|
|
""" |
|
|
Agent creation and configuration. |
|
|
""" |
|
|
from typing import Optional |
|
|
from langchain.agents import create_agent |
|
|
from langchain_openai import ChatOpenAI |
|
|
from langgraph.checkpoint.memory import InMemorySaver |
|
|
|
|
|
from .config import AgentConfig |
|
|
from .prompts import get_prompt_for_agent_type, NUTHATCH_BIRDSCOPE_PROMPT |
|
|
from .mcp_clients import MCPClientManager |
|
|
|
|
|
|
|
|
class AgentFactory: |
|
|
"""Factory for creating different types of bird classification agents.""" |
|
|
|
|
|
@staticmethod |
|
|
async def create_classifier_agent( |
|
|
model_name: Optional[str] = None, |
|
|
temperature: Optional[float] = None, |
|
|
with_memory: bool = False |
|
|
): |
|
|
""" |
|
|
Create a basic bird classifier agent (Modal only). |
|
|
|
|
|
Args: |
|
|
model_name: LLM model to use (defaults to config) |
|
|
temperature: Model temperature (defaults to config) |
|
|
with_memory: Enable conversation memory |
|
|
|
|
|
Returns: |
|
|
Configured LangGraph agent |
|
|
""" |
|
|
|
|
|
AgentConfig.validate() |
|
|
|
|
|
|
|
|
client = await MCPClientManager.create_classifier_client() |
|
|
tools = await MCPClientManager.get_tools(client) |
|
|
|
|
|
|
|
|
model = ChatOpenAI( |
|
|
model=model_name or AgentConfig.DEFAULT_MODEL, |
|
|
temperature=temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE |
|
|
) |
|
|
|
|
|
|
|
|
system_prompt = get_prompt_for_agent_type("classifier") |
|
|
|
|
|
|
|
|
agent_kwargs = { |
|
|
"model": model, |
|
|
"tools": tools, |
|
|
"system_prompt": system_prompt |
|
|
} |
|
|
|
|
|
|
|
|
if with_memory: |
|
|
agent_kwargs["checkpointer"] = InMemorySaver() |
|
|
|
|
|
print("[STATUS]: Creating LangGraph agent...") |
|
|
agent = create_agent(**agent_kwargs) |
|
|
|
|
|
print("[SUCCESS]: Agent ready!\n") |
|
|
return agent |
|
|
|
|
|
@staticmethod |
|
|
async def create_multi_server_agent( |
|
|
model_name: Optional[str] = None, |
|
|
temperature: Optional[float] = None, |
|
|
with_memory: bool = True |
|
|
): |
|
|
""" |
|
|
Create agent with both Modal classifier and eBird tools. |
|
|
|
|
|
Args: |
|
|
model_name: LLM model to use (defaults to config) |
|
|
temperature: Model temperature (defaults to config) |
|
|
with_memory: Enable conversation memory (default: True) |
|
|
|
|
|
Returns: |
|
|
Configured LangGraph agent with all tools |
|
|
""" |
|
|
|
|
|
AgentConfig.validate() |
|
|
|
|
|
|
|
|
client = await MCPClientManager.create_multi_server_client() |
|
|
tools = await MCPClientManager.get_tools(client) |
|
|
|
|
|
|
|
|
model = ChatOpenAI( |
|
|
model=model_name or AgentConfig.DEFAULT_MODEL, |
|
|
temperature=temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE |
|
|
) |
|
|
|
|
|
|
|
|
system_prompt = get_prompt_for_agent_type("multi_server") |
|
|
|
|
|
|
|
|
agent_kwargs = { |
|
|
"model": model, |
|
|
"tools": tools, |
|
|
"system_prompt": system_prompt |
|
|
} |
|
|
|
|
|
|
|
|
if with_memory: |
|
|
agent_kwargs["checkpointer"] = InMemorySaver() |
|
|
|
|
|
print("[STATUS]: Creating multi-server LangGraph agent...") |
|
|
agent = create_agent(**agent_kwargs) |
|
|
|
|
|
print("[SUCCESS]: Agent ready with all tools!\n") |
|
|
return agent |
|
|
|
|
|
@staticmethod |
|
|
async def create_streaming_agent( |
|
|
model_name: Optional[str] = None, |
|
|
temperature: Optional[float] = None, |
|
|
system_prompt: Optional[str] = None, |
|
|
with_memory: bool = True |
|
|
): |
|
|
""" |
|
|
Create streaming multi-server agent with custom system prompt. |
|
|
|
|
|
Args: |
|
|
model_name: LLM model (default: gpt-4o-mini) |
|
|
temperature: Sampling temperature (default: 0) |
|
|
system_prompt: Custom system message |
|
|
with_memory: Enable conversation memory |
|
|
""" |
|
|
print("[STATUS]: Creating streaming agent...") |
|
|
|
|
|
|
|
|
model_name = model_name or AgentConfig.DEFAULT_MODEL |
|
|
temperature = temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE |
|
|
|
|
|
|
|
|
if system_prompt is None: |
|
|
system_prompt = """You are an expert bird identification assistant with access to: |
|
|
2. **Bird Classifier** - Identify birds from images with high accuracy |
|
|
2. **eBird Database** - Find recent sightings, hotspots, and species info |
|
|
|
|
|
**Your capabilities:** |
|
|
- Classify bird images and provide confidence scores |
|
|
- Find where birds have been spotted recently |
|
|
- Recommend birding locations |
|
|
- Answer questions about bird species and habitats |
|
|
|
|
|
**Response style:** |
|
|
- Be enthusiastic and educational |
|
|
- Always cite confidence scores for identifications |
|
|
- Provide actionable location recommendations |
|
|
- Format responses clearly with markdown |
|
|
|
|
|
Let's explore the amazing world of bids together! |
|
|
|
|
|
""" |
|
|
|
|
|
client = await MCPClientManager.create_multi_server_client() |
|
|
tools = await MCPClientManager.get_tools(client) |
|
|
|
|
|
|
|
|
model = ChatOpenAI( |
|
|
model=model_name, |
|
|
temperature=temperature, |
|
|
streaming=True, |
|
|
) |
|
|
|
|
|
|
|
|
agent_kwargs = { |
|
|
"model": model, |
|
|
"tools": tools, |
|
|
"system_prompt": system_prompt |
|
|
} |
|
|
|
|
|
|
|
|
if with_memory: |
|
|
agent_kwargs["checkpointer"] = InMemorySaver() |
|
|
|
|
|
print("[STATUS]: Creating LangGraph agent with streaming...") |
|
|
agent = create_agent(**agent_kwargs) |
|
|
|
|
|
print("[SUCCESS]: Streaming agent ready!\n") |
|
|
return agent |
|
|
|
|
|
@staticmethod |
|
|
async def create_streaming_agent_with_openai( |
|
|
model: str = "gpt-4o-mini", |
|
|
openai_key: str = None, |
|
|
temperature: Optional[float] = None, |
|
|
system_prompt: Optional[str] = None, |
|
|
with_memory: bool = True |
|
|
): |
|
|
""" |
|
|
Create streaming agent with OpenAI LLM using user-provided API key. |
|
|
|
|
|
Args: |
|
|
model: OpenAI model name (e.g., "gpt-4o-mini", "gpt-4o") |
|
|
openai_key: User's OpenAI key (required) |
|
|
temperature: Sampling temperature (0-2), defaults to config |
|
|
system_prompt: Custom system prompt |
|
|
with_memory: Enable conversation memory |
|
|
|
|
|
Returns: |
|
|
LangGraph agent with OpenAI LLM and MCP tools |
|
|
""" |
|
|
|
|
|
if not openai_key: |
|
|
raise ValueError("OpenAI key is required") |
|
|
|
|
|
print(f"[AGENT] Creating OpenAI agent with model: {model}") |
|
|
|
|
|
|
|
|
client = await MCPClientManager.create_multi_server_client() |
|
|
tools = await MCPClientManager.get_tools(client) |
|
|
|
|
|
|
|
|
llm = ChatOpenAI( |
|
|
model=model, |
|
|
api_key=openai_key, |
|
|
temperature=temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE, |
|
|
streaming=True |
|
|
) |
|
|
|
|
|
|
|
|
agent_kwargs = { |
|
|
"model": llm, |
|
|
"tools": tools, |
|
|
"system_prompt": system_prompt or "You are a helpful AI assistant." |
|
|
} |
|
|
|
|
|
if with_memory: |
|
|
agent_kwargs["checkpointer"] = InMemorySaver() |
|
|
|
|
|
agent = create_agent(**agent_kwargs) |
|
|
return agent |
|
|
|
|
|
@staticmethod |
|
|
async def create_streaming_agent_with_hf( |
|
|
model: str = "meta-llama/Llama-3.1-8B-Instruct", |
|
|
hf_token: str = None, |
|
|
temperature: Optional[float] = None, |
|
|
system_prompt: Optional[str] = None, |
|
|
with_memory: bool = True |
|
|
): |
|
|
""" |
|
|
Create streaming agent with HuggingFace Inference Providers. |
|
|
|
|
|
Uses HF's OpenAI-compatible router endpoint for full tool calling support. |
|
|
|
|
|
Args: |
|
|
model: HF model repo ID (e.g., "meta-llama/Llama-3.1-8B-Instruct") |
|
|
hf_token: User's HF API token (required) |
|
|
temperature: Sampling temperature (0-1), defaults to config |
|
|
system_prompt: Custom system prompt |
|
|
with_memory: Enable conversation memory |
|
|
|
|
|
Returns: |
|
|
LangGraph agent with HF LLM and MCP tools |
|
|
""" |
|
|
|
|
|
if not hf_token: |
|
|
raise ValueError("HuggingFace token is required") |
|
|
|
|
|
print(f"[AGENT] Creating HuggingFace agent with model: {model}") |
|
|
|
|
|
|
|
|
client = await MCPClientManager.create_multi_server_client() |
|
|
tools = await MCPClientManager.get_tools(client) |
|
|
|
|
|
|
|
|
|
|
|
llm = ChatOpenAI( |
|
|
base_url="https://router.huggingface.co/v1", |
|
|
api_key=hf_token, |
|
|
model=model, |
|
|
temperature=temperature if temperature is not None else AgentConfig.HF_TEMPERATURE, |
|
|
streaming=True |
|
|
) |
|
|
|
|
|
|
|
|
agent_kwargs = { |
|
|
"model": llm, |
|
|
"tools": tools, |
|
|
"system_prompt": system_prompt or "You are a helpful AI assistant." |
|
|
} |
|
|
|
|
|
if with_memory: |
|
|
agent_kwargs["checkpointer"] = InMemorySaver() |
|
|
|
|
|
agent = create_agent(**agent_kwargs) |
|
|
return agent |
|
|
|
|
|
@staticmethod |
|
|
async def create_subagent_orchestrator( |
|
|
model: str, |
|
|
api_key: str, |
|
|
provider: str, |
|
|
mode: str = "Single Agent (All Tools)" |
|
|
): |
|
|
""" |
|
|
Create agent using subagent architecture (always uses subagent system). |
|
|
|
|
|
Args: |
|
|
model: LLM model name |
|
|
api_key: API key for the provider |
|
|
provider: LLM provider ("openai" or "huggingface") |
|
|
mode: Agent mode (e.g., "Single Agent (All Tools)", "Specialized Subagents (3 Specialists)") |
|
|
|
|
|
Returns: |
|
|
Configured agent (single subagent or router workflow) |
|
|
""" |
|
|
from .subagent_config import SubAgentConfig |
|
|
from .subagent_router import create_router_agent |
|
|
from .subagent_factory import SubAgentFactory |
|
|
from langchain_openai import ChatOpenAI |
|
|
|
|
|
|
|
|
mode_config = SubAgentConfig.get_mode_config(mode) |
|
|
print(f"[AGENT]: Creating agent in '{mode}' mode") |
|
|
|
|
|
|
|
|
if provider == "huggingface": |
|
|
llm = ChatOpenAI( |
|
|
base_url="https://router.huggingface.co/v1", |
|
|
api_key=api_key, |
|
|
model=model, |
|
|
temperature=AgentConfig.HF_TEMPERATURE, |
|
|
streaming=True |
|
|
) |
|
|
else: |
|
|
llm = ChatOpenAI( |
|
|
model=model, |
|
|
api_key=api_key, |
|
|
temperature=AgentConfig.OPENAI_TEMPERATURE, |
|
|
streaming=True |
|
|
) |
|
|
|
|
|
|
|
|
client = await MCPClientManager.create_multi_server_client() |
|
|
tools = await MCPClientManager.get_tools(client) |
|
|
|
|
|
|
|
|
if mode_config["use_router"]: |
|
|
|
|
|
print(f"[AGENT]: Creating router with subagents: {mode_config['subagents']}") |
|
|
workflow = await create_router_agent(tools, llm) |
|
|
return workflow |
|
|
else: |
|
|
|
|
|
subagent_name = mode_config["subagents"][0] |
|
|
print(f"[AGENT]: Creating single subagent: {subagent_name}") |
|
|
agent = await SubAgentFactory.create_subagent(subagent_name, tools, llm) |
|
|
return agent |
|
|
|
|
|
|
|
|
async def create_bird_agent(**kwargs): |
|
|
"""Create basic classifier agent. Alias for backwards compatibility.""" |
|
|
return await AgentFactory.create_classifier_agent(**kwargs) |
|
|
|
|
|
|
|
|
async def create_multi_agent(**kwargs): |
|
|
"""Create multi-server agent. Alias for convenience.""" |
|
|
return await AgentFactory.create_multi_server_agent(**kwargs) |