File size: 2,164 Bytes
ff0e97f
 
 
 
 
 
 
 
 
 
 
 
 
68723f3
ff0e97f
 
 
 
 
 
 
 
 
68723f3
ff0e97f
 
 
 
 
 
68723f3
ff0e97f
 
 
68723f3
ff0e97f
 
68723f3
ff0e97f
128f5d1
 
 
ff0e97f
68723f3
ff0e97f
 
 
 
128f5d1
ff0e97f
68723f3
ff0e97f
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
"""
Subagent Supervisor

Uses LangGraph's create_supervisor() for LLM-based routing between specialists.
"""
from typing import List, Any
from langchain_core.language_models import BaseChatModel
from langchain.agents import create_agent
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.checkpoint.memory import InMemorySaver
from .subagent_config import SubAgentConfig
from .subagent_factory import SubAgentFactory

async def create_supervisor_workflow(all_tools: List[Any], llm: BaseChatModel, provider: str = "openai"):
    """
    Create a supervisor workflow that orchestrates specialized subagents.

    The supervisor uses LLM-based routing to delegate tasks to the most
    appropriate specialist agent.

    Args:
        all_tools: Full list of available MCP tools
        llm: Language model for both supervisor and subagents
        provider: LLM provider name ("openai", "anthropic", "huggingface")

    Returns:
        Compiled LangGraph workflow with supervisor
    """
    from langgraph_supervisor import create_supervisor

    # Create the three specialist agents with provider-specific prompts
    print("[SUPERVISOR]: Creating specialist agents...")

    image_agent = await SubAgentFactory.create_subagent(
        "image_identifier", all_tools, llm, provider=provider
    )
    taxonomy_agent = await SubAgentFactory.create_subagent(
        "taxonomy_specialist", all_tools, llm, provider=provider
    )
    audio_finder_agent = await SubAgentFactory.create_subagent(
        "generalist", all_tools, llm, provider=provider
    )

    # Create supervisor with LLM-based routing and provider-specific prompt
    print("[SUPERVISOR]: Creating supervisor orchestrator...")

    # create_supervisor takes a list of agents as first positional argument
    workflow = create_supervisor(
        [image_agent, taxonomy_agent, audio_finder_agent],
        model=llm,
        prompt=SubAgentConfig.get_router_prompt(provider=provider)
    )

    # Compile with shared memory for conversation context
    print("[SUPERVISOR]: Compiling workflow with memory...")
    return workflow.compile(checkpointer=InMemorySaver())