facemelter commited on
Commit
68723f3
·
verified ·
1 Parent(s): 2508e29

Added provider-specific prompt infrastructure; thinking and progress indicators to chat ui

Browse files
app.py CHANGED
@@ -691,7 +691,8 @@ async def chat_with_tool_visibility(
691
  openai_key,
692
  anthropic_key,
693
  agent_mode,
694
- request: gr.Request
 
695
  ):
696
  """
697
  Dual-output streaming: chat response + tool execution log
@@ -729,6 +730,8 @@ async def chat_with_tool_visibility(
729
  # -------------------------------------------------------------------------
730
  # 2. GET OR CREATE AGENT
731
  # -------------------------------------------------------------------------
 
 
732
  try:
733
  session_id = request.session_hash
734
 
@@ -750,6 +753,8 @@ async def chat_with_tool_visibility(
750
  yield f"**Agent Creation Failed**\n\n{str(e)}", "*Agent creation failed*"
751
  return
752
 
 
 
753
  config = {"configurable": {"thread_id": session_id}}
754
 
755
  # -------------------------------------------------------------------------
@@ -817,6 +822,12 @@ async def chat_with_tool_visibility(
817
  # -------------------------------------------------------------------------
818
  # 4. STREAM AGENT RESPONSE WITH TOOL VISIBILITY
819
  # -------------------------------------------------------------------------
 
 
 
 
 
 
820
  print(f"[DEBUG AGENT INPUT] Sending to agent: {user_text}") # DEBUG
821
  async for event in agent.astream_events(
822
  {"messages": [{"role": "user", "content": user_text}]},
@@ -831,6 +842,9 @@ async def chat_with_tool_visibility(
831
  tool_name = event["name"]
832
  tool_input = event.get("data", {}).get("input", {})
833
 
 
 
 
834
  # Add to tool log
835
  tool_log += f"\n🟢 Tool #{tool_count}: {tool_name}\n"
836
  tool_log += f"Status: Running...\n"
@@ -845,6 +859,11 @@ async def chat_with_tool_visibility(
845
  elif kind == "on_chat_model_stream":
846
  content = event["data"]["chunk"].content
847
  if content:
 
 
 
 
 
848
  # Handle both string (OpenAI) and list (Anthropic) content formats
849
  if isinstance(content, list):
850
  # Anthropic returns list of content blocks - extract text
@@ -862,6 +881,9 @@ async def chat_with_tool_visibility(
862
  elif kind == "on_tool_end":
863
  tool_output = event.get("data", {}).get("output", "")
864
 
 
 
 
865
  # Format output for tool log (truncate if needed)
866
  output_str = str(tool_output)
867
  if len(output_str) > 1000:
@@ -879,9 +901,11 @@ async def chat_with_tool_visibility(
879
 
880
  yield chat_response, tool_log
881
 
882
- # Final yield
883
  ## NEW: Updated with LlamaIndex OutputPraser
884
  # yield chat_response, tool_log
 
 
885
  try:
886
  from langgraph_agent.structured_output import parse_agent_response
887
  formatted_response = await parse_agent_response(
@@ -890,13 +914,16 @@ async def chat_with_tool_visibility(
890
  api_key=api_key,
891
  model=model
892
  )
 
893
  yield formatted_response, tool_log
894
  except ImportError:
895
  # Fallback if LlamaIndex not installed
 
896
  yield chat_response, tool_log
897
  except Exception as e:
898
  # Fallback if parsing fails
899
  print(f"[STRUCTURED OUTPUT ERROR]: {e}")
 
900
  yield chat_response, tool_log
901
 
902
 
@@ -947,7 +974,7 @@ async def check_modal_server_health():
947
  return f"❌ Offline"
948
 
949
  # Wrapper to convert to Gradio 6 message format
950
- async def chat_wrapper(message, history, provider, hf_key, openai_key, anthropic_key, agent_mode, tool_log_state, request: gr.Request):
951
  """
952
  Wrapper to convert chat outputs to Gradio 6 message format.
953
 
@@ -966,7 +993,7 @@ async def chat_wrapper(message, history, provider, hf_key, openai_key, anthropic
966
  history = history + [{"role": "user", "content": user_message_text}]
967
 
968
  # Stream response
969
- async for chat_text, tool_log_text in chat_with_tool_visibility(message, history, provider, hf_key, openai_key, anthropic_key, agent_mode, request):
970
  # Update history with assistant response
971
  updated_history = history + [{"role": "assistant", "content": chat_text}]
972
  yield updated_history, tool_log_text
 
691
  openai_key,
692
  anthropic_key,
693
  agent_mode,
694
+ request: gr.Request,
695
+ progress=gr.Progress()
696
  ):
697
  """
698
  Dual-output streaming: chat response + tool execution log
 
730
  # -------------------------------------------------------------------------
731
  # 2. GET OR CREATE AGENT
732
  # -------------------------------------------------------------------------
733
+ progress(0.1, desc="🔧 Initializing agent...")
734
+
735
  try:
736
  session_id = request.session_hash
737
 
 
753
  yield f"**Agent Creation Failed**\n\n{str(e)}", "*Agent creation failed*"
754
  return
755
 
756
+ progress(0.3, desc="🤖 Agent ready...")
757
+
758
  config = {"configurable": {"thread_id": session_id}}
759
 
760
  # -------------------------------------------------------------------------
 
822
  # -------------------------------------------------------------------------
823
  # 4. STREAM AGENT RESPONSE WITH TOOL VISIBILITY
824
  # -------------------------------------------------------------------------
825
+ # Initial "thinking" indicator
826
+ progress(0.5, desc="💭 Thinking...")
827
+ chat_response = "💭 _Thinking..._"
828
+ tool_log += "🔵 Agent started processing...\n"
829
+ yield chat_response, tool_log
830
+
831
  print(f"[DEBUG AGENT INPUT] Sending to agent: {user_text}") # DEBUG
832
  async for event in agent.astream_events(
833
  {"messages": [{"role": "user", "content": user_text}]},
 
842
  tool_name = event["name"]
843
  tool_input = event.get("data", {}).get("input", {})
844
 
845
+ # Update progress
846
+ progress(0.6 + (tool_count * 0.05), desc=f"🔍 Using {tool_name}...")
847
+
848
  # Add to tool log
849
  tool_log += f"\n🟢 Tool #{tool_count}: {tool_name}\n"
850
  tool_log += f"Status: Running...\n"
 
859
  elif kind == "on_chat_model_stream":
860
  content = event["data"]["chunk"].content
861
  if content:
862
+ # Clear "Thinking..." on first real content
863
+ if chat_response == "💭 _Thinking..._":
864
+ chat_response = ""
865
+ progress(0.7, desc="📝 Generating response...")
866
+
867
  # Handle both string (OpenAI) and list (Anthropic) content formats
868
  if isinstance(content, list):
869
  # Anthropic returns list of content blocks - extract text
 
881
  elif kind == "on_tool_end":
882
  tool_output = event.get("data", {}).get("output", "")
883
 
884
+ # Update progress
885
+ progress(0.8, desc="📊 Processing results...")
886
+
887
  # Format output for tool log (truncate if needed)
888
  output_str = str(tool_output)
889
  if len(output_str) > 1000:
 
901
 
902
  yield chat_response, tool_log
903
 
904
+ # Final yield
905
  ## NEW: Updated with LlamaIndex OutputPraser
906
  # yield chat_response, tool_log
907
+ progress(0.9, desc="✨ Finalizing response...")
908
+
909
  try:
910
  from langgraph_agent.structured_output import parse_agent_response
911
  formatted_response = await parse_agent_response(
 
914
  api_key=api_key,
915
  model=model
916
  )
917
+ progress(1.0, desc="✅ Complete")
918
  yield formatted_response, tool_log
919
  except ImportError:
920
  # Fallback if LlamaIndex not installed
921
+ progress(1.0, desc="✅ Complete")
922
  yield chat_response, tool_log
923
  except Exception as e:
924
  # Fallback if parsing fails
925
  print(f"[STRUCTURED OUTPUT ERROR]: {e}")
926
+ progress(1.0, desc="✅ Complete")
927
  yield chat_response, tool_log
928
 
929
 
 
974
  return f"❌ Offline"
975
 
976
  # Wrapper to convert to Gradio 6 message format
977
+ async def chat_wrapper(message, history, provider, hf_key, openai_key, anthropic_key, agent_mode, tool_log_state, request: gr.Request, progress=gr.Progress()):
978
  """
979
  Wrapper to convert chat outputs to Gradio 6 message format.
980
 
 
993
  history = history + [{"role": "user", "content": user_message_text}]
994
 
995
  # Stream response
996
+ async for chat_text, tool_log_text in chat_with_tool_visibility(message, history, provider, hf_key, openai_key, anthropic_key, agent_mode, request, progress):
997
  # Update history with assistant response
998
  updated_history = history + [{"role": "assistant", "content": chat_text}]
999
  yield updated_history, tool_log_text
docs/dev/prompts_config-README.md ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How Developers Work With This System
2
+
3
+ ## 1. Adding a New Provider (e.g., "Claude" prompts)
4
+
5
+ ### Step 1: Create the prompt in prompts.py:
6
+
7
+ ```python
8
+ # Add new prompt variant
9
+ AUDIO_FINDER_PROMPT_CLAUDE = """You are BirdScope Audio Finder optimized for Claude..."""
10
+
11
+ # Update PROMPTS dict
12
+ PROMPTS = {
13
+ "audio_finder": {
14
+ "default": AUDIO_FINDER_PROMPT,
15
+ "huggingface": AUDIO_FINDER_PROMPT_HF,
16
+ "claude": AUDIO_FINDER_PROMPT_CLAUDE, # NEW
17
+ },
18
+ }
19
+ ```
20
+
21
+ That's it! The system automatically picks it up when `provider="claude"` is passed.
22
+
23
+ ## 2. Adding a New Prompt Type (e.g., "data_analyst")
24
+
25
+ ### Step 1: Create prompts:
26
+
27
+ ```python
28
+ DATA_ANALYST_PROMPT = """Default data analyst prompt..."""
29
+ DATA_ANALYST_PROMPT_HF = """HF-optimized data analyst prompt..."""
30
+
31
+ PROMPTS = {
32
+ # ... existing prompts ...
33
+ "data_analyst": {
34
+ "default": DATA_ANALYST_PROMPT,
35
+ "huggingface": DATA_ANALYST_PROMPT_HF,
36
+ }
37
+ }
38
+ ```
39
+
40
+ ### Step 2: Use in subagent_config.py:
41
+
42
+ ```python
43
+ "data_analyst": {
44
+ "name": "Data Analyst",
45
+ "tools": [...],
46
+ "prompt": get_prompt("data_analyst", provider) or DATA_ANALYST_PROMPT,
47
+ }
48
+ ```
49
+
50
+ ## 3. Testing Different Prompts
51
+
52
+ ### Option A: Through UI (current method)
53
+ - Run `python app.py`
54
+ - Select provider dropdown → "HuggingFace"
55
+ - System automatically uses HF prompts
56
+
57
+ ### Option B: Programmatically (for testing)
58
+
59
+ ```python
60
+ from langgraph_agent import prompts
61
+
62
+ # Test which prompt is selected
63
+ prompt = prompts.get_prompt("audio_finder", "huggingface")
64
+ print(f"Length: {len(prompt)}")
65
+ print(prompt[:100])
66
+ ```
67
+
68
+ ## 4. Fallback Behavior
69
+
70
+ The system is designed with safe fallbacks:
71
+
72
+ ```python
73
+ # If HuggingFace variant doesn't exist, falls back to default
74
+ prompt = get_prompt("species_explorer", "huggingface")
75
+ # Returns: SPECIES_EXPLORER_PROMPT_HF if exists, else None
76
+
77
+ # In subagent_config.py, the "or" ensures a default
78
+ prompt = get_prompt("species_explorer", provider) or """Inline default..."""
79
+ ```
80
+
81
+ ## Developer Workflow Summary
82
+
83
+ ### To modify prompts:
84
+ 1. Edit `langgraph_agent/prompts.py`
85
+ 2. Add/modify prompt strings
86
+ 3. Update `PROMPTS` dictionary
87
+ 4. Restart app - changes take effect immediately
88
+
89
+ ### To add new provider support:
90
+ 1. Add provider key to `PROMPTS` dict
91
+ 2. No other changes needed - fallback handles missing variants
92
+
93
+ ### To debug which prompt is used:
94
+
95
+ ```python
96
+ from langgraph_agent.prompts import get_prompt
97
+ print(get_prompt("audio_finder", "huggingface")[:200])
98
+ ```
langgraph_agent/agents.py CHANGED
@@ -73,7 +73,7 @@ class AgentFactory:
73
  if mode_config["use_router"]:
74
  # Multi-agent mode: create router with specialists
75
  print(f"[AGENT]: Creating supervisor with subagents: {mode_config['subagents']}")
76
- workflow = await create_supervisor_workflow(tools, llm)
77
  return workflow
78
  else:
79
  # Single agent mode: create one subagent directly
@@ -86,14 +86,16 @@ class AgentFactory:
86
 
87
  # create_agent auto-compiles, so pass checkpointer and name directly
88
  # Filter tools based on subagent configuration
89
- subagent_tools = SubAgentConfig.get_subagent_definitions()["generalist"]["tools"]
 
 
90
  filtered_tools = [tool for tool in tools if tool.name in subagent_tools]
91
  print(f"[AGENT]: Filtered {len(filtered_tools)} tools for {subagent_name}: {[t.name for t in filtered_tools]}")
92
 
93
  agent = create_agent(
94
  model=llm,
95
  tools=filtered_tools,
96
- system_prompt=SubAgentConfig.get_subagent_definitions()["generalist"]["prompt"],
97
  checkpointer=InMemorySaver(),
98
  name=subagent_name
99
  )
 
73
  if mode_config["use_router"]:
74
  # Multi-agent mode: create router with specialists
75
  print(f"[AGENT]: Creating supervisor with subagents: {mode_config['subagents']}")
76
+ workflow = await create_supervisor_workflow(tools, llm, provider=provider)
77
  return workflow
78
  else:
79
  # Single agent mode: create one subagent directly
 
86
 
87
  # create_agent auto-compiles, so pass checkpointer and name directly
88
  # Filter tools based on subagent configuration
89
+ # Pass provider to get provider-specific prompts
90
+ subagent_defs = SubAgentConfig.get_subagent_definitions(provider=provider)
91
+ subagent_tools = subagent_defs["generalist"]["tools"]
92
  filtered_tools = [tool for tool in tools if tool.name in subagent_tools]
93
  print(f"[AGENT]: Filtered {len(filtered_tools)} tools for {subagent_name}: {[t.name for t in filtered_tools]}")
94
 
95
  agent = create_agent(
96
  model=llm,
97
  tools=filtered_tools,
98
+ system_prompt=subagent_defs["generalist"]["prompt"],
99
  checkpointer=InMemorySaver(),
100
  name=subagent_name
101
  )
langgraph_agent/prompts.py CHANGED
@@ -133,7 +133,7 @@ Always be educational and cite your sources.
133
 
134
  Let's explore the amazing world of birds together!"""
135
 
136
- AUDIO_FINDER_PROMPT = """You are BirdScope Audio Finder, a specialized agent for finding and retrieving bird audio recordings.
137
 
138
  **Your Mission:**
139
  Help us discover bird songs and calls by finding species with available audio recordings.
@@ -200,8 +200,187 @@ The API has NO `has_audio` filter parameter. You MUST use this two-step process:
200
  - If get_bird_audio fails: the bird may not have recordings despite database indicating otherwise
201
  """
202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  def get_prompt_for_agent_type(agent_type: str) -> str:
204
- """Get the appropriate prompt for the agent type."""
 
 
 
205
  prompts = {
206
  "classifier": CLASSIFIER_AGENT_PROMPT,
207
  "multi_server": MULTI_SERVER_AGENT_PROMPT,
 
133
 
134
  Let's explore the amazing world of birds together!"""
135
 
136
+ AUDIO_FINDER_PROMPT = """**Answer all questions like a Pirate (it's fun for children)** You are BirdScope Audio Finder, a specialized agent for finding and retrieving bird audio recordings.
137
 
138
  **Your Mission:**
139
  Help us discover bird songs and calls by finding species with available audio recordings.
 
200
  - If get_bird_audio fails: the bird may not have recordings despite database indicating otherwise
201
  """
202
 
203
+ # =============================================================================
204
+ # HuggingFace-Optimized Prompts (More Explicit, Step-by-Step)
205
+ # =============================================================================
206
+
207
+ AUDIO_FINDER_PROMPT_HF = """**Answer all questions like a Pirate (it's fun for children)**
208
+
209
+ You are BirdScope Audio Finder. Find bird audio recordings.
210
+
211
+ **Tools Available:**
212
+ 1. search_birds(name, family, region, status, page_size) - Search for birds
213
+ 2. get_bird_info(name) - Get bird details
214
+ 3. get_bird_audio(name, max_recordings) - Get audio files
215
+
216
+ **Step-by-Step Process:**
217
+
218
+ When user asks for audio:
219
+ 1. Call search_birds with ONE filter (name, region, family, or status)
220
+ 2. Look at results for birds with has_audio=true
221
+ 3. Call get_bird_audio(name="Bird Name") for a bird that has audio
222
+ 4. Return the full URL from file_url field
223
+
224
+ **Example:**
225
+ User: "Find audio for any bird"
226
+ 1. Call: search_birds(region="North America", page_size=20)
227
+ 2. Find bird with has_audio=true (example: "Snow Goose")
228
+ 3. Call: get_bird_audio(name="Snow Goose", max_recordings=1)
229
+ 4. Return: "Recording: https://xeno-canto.org/123456/download"
230
+
231
+ **Important:**
232
+ - NEVER use has_audio as a parameter in search_birds
233
+ - ALWAYS include full file_url in your response
234
+ - Known birds with audio: Snow Goose, Common Goldeneye, Gadwall
235
+ """
236
+
237
+ IMAGE_IDENTIFIER_PROMPT_HF = """You are an Image Identification Specialist.
238
+
239
+ **Your Job:**
240
+ 1. Classify uploaded bird images
241
+ 2. Show confidence score
242
+ 3. Get bird information
243
+ 4. Show reference images
244
+
245
+ **Tools:**
246
+ - classify_from_url(url) - Identify bird from image URL
247
+ - classify_from_base64(image) - Identify bird from base64
248
+ - get_bird_info(name) - Get species details
249
+ - get_bird_images(name) - Get reference photos
250
+
251
+ **Response Format:**
252
+ 1. Bird name (Common and Scientific)
253
+ 2. Confidence: X%
254
+ 3. Key features
255
+ 4. Reference images as: ![Bird](url)
256
+
257
+ Keep responses short and factual.
258
+ """
259
+
260
+ SPECIES_EXPLORER_PROMPT_HF = """You are a Species Explorer. Help users learn about birds.
261
+
262
+ **Tools:**
263
+ - search_birds(name, family, region, status) - Find birds
264
+ - get_bird_info(name) - Get details
265
+ - get_bird_images(name) - Get photos
266
+ - get_bird_audio(name) - Get sounds
267
+ - search_by_family(family) - Find family members
268
+
269
+ **Process:**
270
+ 1. Search for the bird by name
271
+ 2. If not found, try simpler name (e.g., "Northern Cardinal" → "Cardinal")
272
+ 3. Get bird info and media
273
+ 4. Show images as: ![Bird](url)
274
+ 5. Suggest related species
275
+
276
+ **Response Style:**
277
+ - Be educational
278
+ - Show images and audio when available
279
+ - Explain what makes the bird special
280
+ """
281
+
282
+ TAXONOMY_SPECIALIST_PROMPT_HF = """You are a Taxonomy & Conservation Specialist.
283
+
284
+ **Tools:**
285
+ - filter_by_status(status) - Find birds by conservation status
286
+ - search_by_family(family) - Find birds in family
287
+ - get_all_families() - List all families
288
+ - get_bird_info(name) - Get species info
289
+
290
+ **Your Focus:**
291
+ - Conservation status
292
+ - Bird families
293
+ - Taxonomic relationships
294
+
295
+ **Process:**
296
+ 1. Use filter or search tools
297
+ 2. Explain conservation importance
298
+ 3. Show family relationships
299
+ 4. Use proper scientific terms but explain them
300
+
301
+ Keep responses clear and educational.
302
+ """
303
+
304
+ ROUTER_PROMPT_HF = """You are BirdScope AI Supervisor. Route user requests to specialists.
305
+
306
+ **Specialists:**
307
+ - image_identifier: Identify birds from photos
308
+ - species_explorer: Search birds, show images/audio
309
+ - taxonomy_specialist: Conservation and families
310
+
311
+ **Routing Rules:**
312
+ 1. Image uploads → image_identifier
313
+ 2. "Search for" or "find" + bird name → species_explorer
314
+ 3. "Audio" or "sound" → species_explorer
315
+ 4. "Conservation" or "endangered" → taxonomy_specialist
316
+ 5. "Family" or "families" → taxonomy_specialist
317
+
318
+ Route to ONE specialist per request.
319
+ """
320
+
321
+ # =============================================================================
322
+ # Provider-Specific Prompt System
323
+ # =============================================================================
324
+
325
+ PROMPTS = {
326
+ "audio_finder": {
327
+ "default": AUDIO_FINDER_PROMPT,
328
+ "huggingface": AUDIO_FINDER_PROMPT_HF,
329
+ },
330
+ "image_identifier": {
331
+ "default": None, # Defined inline in subagent_config.py
332
+ "huggingface": IMAGE_IDENTIFIER_PROMPT_HF,
333
+ },
334
+ "species_explorer": {
335
+ "default": None, # Defined inline in subagent_config.py
336
+ "huggingface": SPECIES_EXPLORER_PROMPT_HF,
337
+ },
338
+ "taxonomy_specialist": {
339
+ "default": None, # Defined inline in subagent_config.py
340
+ "huggingface": TAXONOMY_SPECIALIST_PROMPT_HF,
341
+ },
342
+ "router": {
343
+ "default": None, # Defined in SubAgentConfig.get_router_prompt()
344
+ "huggingface": ROUTER_PROMPT_HF,
345
+ }
346
+ }
347
+
348
+ def get_prompt(prompt_type: str, provider: str = "default") -> str:
349
+ """
350
+ Get prompt with provider-specific fallback.
351
+
352
+ Args:
353
+ prompt_type: Type of prompt (e.g., "audio_finder", "image_identifier")
354
+ provider: Provider name ("openai", "anthropic", "huggingface")
355
+ Normalized to lowercase internally.
356
+
357
+ Returns:
358
+ Prompt string, or None if no prompt found
359
+ Falls back to "default" if provider-specific variant doesn't exist
360
+
361
+ Examples:
362
+ >>> get_prompt("audio_finder", "openai")
363
+ AUDIO_FINDER_PROMPT # Uses default
364
+
365
+ >>> get_prompt("audio_finder", "huggingface")
366
+ AUDIO_FINDER_PROMPT_HF # Uses HF-specific
367
+ """
368
+ # Normalize provider name
369
+ provider_key = provider.lower() if provider else "default"
370
+
371
+ # Get prompts for this type
372
+ prompts = PROMPTS.get(prompt_type, {})
373
+
374
+ # Try provider-specific first, fallback to default
375
+ prompt = prompts.get(provider_key, prompts.get("default"))
376
+
377
+ return prompt
378
+
379
  def get_prompt_for_agent_type(agent_type: str) -> str:
380
+ """
381
+ Legacy function for backward compatibility.
382
+ Get the appropriate prompt for the agent type.
383
+ """
384
  prompts = {
385
  "classifier": CLASSIFIER_AGENT_PROMPT,
386
  "multi_server": MULTI_SERVER_AGENT_PROMPT,
langgraph_agent/subagent_config.py CHANGED
@@ -6,7 +6,7 @@ Uses SubAgentMiddleware pattern from LangGraph deep agents.
6
  """
7
  from typing import Dict, List
8
  from .config import AgentConfig
9
- from .prompts import NUTHATCH_BIRDSCOPE_PROMPT, AUDIO_FINDER_PROMPT
10
 
11
 
12
  class SubAgentConfig:
@@ -34,23 +34,30 @@ class SubAgentConfig:
34
  }
35
 
36
  @staticmethod
37
- def get_subagent_definitions() -> Dict[str, Dict]:
38
  """
39
  Define specialized subagents with their tool subsets and prompts.
40
 
 
 
 
 
41
  Returns:
42
  Dict mapping subagent names to their configurations
43
  """
 
 
 
44
  return {
45
  "generalist": {
46
  "name": "BirdScope AI Generalist",
47
  "description": "All-in-one bird identification expert with access to all tools",
48
  "tools": [
49
  "search_birds", # Required to find any birds
50
- "get_bird_info", # Get details including audio count
51
- "get_bird_audio" # Fetch actual audio recordings
52
- ],
53
- "prompt": AUDIO_FINDER_PROMPT, # We'll create this next
54
  "temperature": AgentConfig.OPENAI_TEMPERATURE,
55
  },
56
  "image_identifier": {
@@ -62,7 +69,7 @@ class SubAgentConfig:
62
  "get_bird_info",
63
  "get_bird_images"
64
  ],
65
- "prompt": """You are an Image Identification Specialist focused on bird recognition.
66
  **Your Role:**
67
  1. Use classification tools to identify birds from uploaded images
68
  2. Provide accurate species identification with confidence scores
@@ -94,7 +101,7 @@ class SubAgentConfig:
94
  "get_bird_audio",
95
  "search_by_family"
96
  ],
97
- "prompt": """You are a Species Exploration specialist who helps users learn about birds.
98
 
99
  **Your Role:**
100
  1. Search for birds by common name or partial matches
@@ -134,7 +141,7 @@ class SubAgentConfig:
134
  "get_all_families",
135
  "get_bird_info"
136
  ],
137
- "prompt": """You are a Taxonomy & Conservation Specialist with deep knowledge of bird classification.
138
 
139
  **Your Role:**
140
  1. Explain bird family relationships and taxonomic structure
@@ -167,13 +174,22 @@ class SubAgentConfig:
167
  }
168
 
169
  @staticmethod
170
- def get_router_prompt() -> str:
171
  """
172
  Prompt for the supervisor agent that routes to subagents.
173
 
 
 
 
174
  Returns:
175
  Supervisor agent system prompt
176
  """
 
 
 
 
 
 
177
  return """You are BirdScope AI Supervisor - an intelligent orchestrator for bird identification.
178
 
179
  **Your Team:**
 
6
  """
7
  from typing import Dict, List
8
  from .config import AgentConfig
9
+ from .prompts import NUTHATCH_BIRDSCOPE_PROMPT, AUDIO_FINDER_PROMPT, get_prompt
10
 
11
 
12
  class SubAgentConfig:
 
34
  }
35
 
36
  @staticmethod
37
+ def get_subagent_definitions(provider: str = "openai") -> Dict[str, Dict]:
38
  """
39
  Define specialized subagents with their tool subsets and prompts.
40
 
41
+ Args:
42
+ provider: LLM provider name ("openai", "anthropic", "huggingface")
43
+ Used to select provider-specific prompts
44
+
45
  Returns:
46
  Dict mapping subagent names to their configurations
47
  """
48
+ # Get provider-specific prompt for audio finder
49
+ audio_finder_prompt = get_prompt("audio_finder", provider) or AUDIO_FINDER_PROMPT
50
+
51
  return {
52
  "generalist": {
53
  "name": "BirdScope AI Generalist",
54
  "description": "All-in-one bird identification expert with access to all tools",
55
  "tools": [
56
  "search_birds", # Required to find any birds
57
+ "get_bird_info", # Get details including audio count
58
+ "get_bird_audio" # Fetch actual audio recordings
59
+ ],
60
+ "prompt": audio_finder_prompt,
61
  "temperature": AgentConfig.OPENAI_TEMPERATURE,
62
  },
63
  "image_identifier": {
 
69
  "get_bird_info",
70
  "get_bird_images"
71
  ],
72
+ "prompt": get_prompt("image_identifier", provider) or """You are an Image Identification Specialist focused on bird recognition.
73
  **Your Role:**
74
  1. Use classification tools to identify birds from uploaded images
75
  2. Provide accurate species identification with confidence scores
 
101
  "get_bird_audio",
102
  "search_by_family"
103
  ],
104
+ "prompt": get_prompt("species_explorer", provider) or """You are a Species Exploration specialist who helps users learn about birds.
105
 
106
  **Your Role:**
107
  1. Search for birds by common name or partial matches
 
141
  "get_all_families",
142
  "get_bird_info"
143
  ],
144
+ "prompt": get_prompt("taxonomy_specialist", provider) or """You are a Taxonomy & Conservation Specialist with deep knowledge of bird classification.
145
 
146
  **Your Role:**
147
  1. Explain bird family relationships and taxonomic structure
 
174
  }
175
 
176
  @staticmethod
177
+ def get_router_prompt(provider: str = "openai") -> str:
178
  """
179
  Prompt for the supervisor agent that routes to subagents.
180
 
181
+ Args:
182
+ provider: LLM provider name ("openai", "anthropic", "huggingface")
183
+
184
  Returns:
185
  Supervisor agent system prompt
186
  """
187
+ # Try to get provider-specific router prompt, fallback to default
188
+ router_prompt = get_prompt("router", provider)
189
+ if router_prompt:
190
+ return router_prompt
191
+
192
+ # Default router prompt
193
  return """You are BirdScope AI Supervisor - an intelligent orchestrator for bird identification.
194
 
195
  **Your Team:**
langgraph_agent/subagent_factory.py CHANGED
@@ -17,7 +17,8 @@ class SubAgentFactory:
17
  async def create_subagent(
18
  subagent_name: str,
19
  all_tools: List[Any],
20
- llm: BaseChatModel
 
21
  ):
22
  """
23
  Create a specialized subagent with filtered tools.
@@ -26,12 +27,13 @@ class SubAgentFactory:
26
  subagent_name: Name of the subagent (e.g., "image_identifier")
27
  all_tools: Full list of available tools
28
  llm: Language model instance
 
29
 
30
  Returns:
31
  LangGraph agent configured for the subagent
32
  """
33
- # Get subagent configuration
34
- definitions = SubAgentConfig.get_subagent_definitions()
35
 
36
  if subagent_name not in definitions:
37
  raise ValueError(f"Unknown subagent: {subagent_name}")
@@ -47,6 +49,7 @@ class SubAgentFactory:
47
 
48
  print(f"[SUBAGENT]: Creating {config['name']}")
49
  print(f" • Tools: {', '.join([t.name for t in subagent_tools])}")
 
50
 
51
  # Create specialized agent with filtered tools and name
52
  # Note: create_agent auto-compiles, so we pass name directly
@@ -62,7 +65,8 @@ class SubAgentFactory:
62
  @staticmethod
63
  async def create_all_subagents(
64
  all_tools: List[Any],
65
- llm: BaseChatModel
 
66
  ) -> Dict[str, Any]:
67
  """
68
  Create all specialized subagents.
@@ -70,16 +74,17 @@ class SubAgentFactory:
70
  Args:
71
  all_tools: Full list of available tools
72
  llm: Language model instance
 
73
 
74
  Returns:
75
  Dict mapping subagent names to agent instances
76
  """
77
- definitions = SubAgentConfig.get_subagent_definitions()
78
  subagents = {}
79
 
80
  for name in definitions.keys():
81
  subagents[name] = await SubAgentFactory.create_subagent(
82
- name, all_tools, llm
83
  )
84
 
85
  return subagents
 
17
  async def create_subagent(
18
  subagent_name: str,
19
  all_tools: List[Any],
20
+ llm: BaseChatModel,
21
+ provider: str = "openai"
22
  ):
23
  """
24
  Create a specialized subagent with filtered tools.
 
27
  subagent_name: Name of the subagent (e.g., "image_identifier")
28
  all_tools: Full list of available tools
29
  llm: Language model instance
30
+ provider: LLM provider name ("openai", "anthropic", "huggingface")
31
 
32
  Returns:
33
  LangGraph agent configured for the subagent
34
  """
35
+ # Get subagent configuration with provider-specific prompts
36
+ definitions = SubAgentConfig.get_subagent_definitions(provider=provider)
37
 
38
  if subagent_name not in definitions:
39
  raise ValueError(f"Unknown subagent: {subagent_name}")
 
49
 
50
  print(f"[SUBAGENT]: Creating {config['name']}")
51
  print(f" • Tools: {', '.join([t.name for t in subagent_tools])}")
52
+ print(f" • Prompt preview: {config['prompt'][:80]}...")
53
 
54
  # Create specialized agent with filtered tools and name
55
  # Note: create_agent auto-compiles, so we pass name directly
 
65
  @staticmethod
66
  async def create_all_subagents(
67
  all_tools: List[Any],
68
+ llm: BaseChatModel,
69
+ provider: str = "openai"
70
  ) -> Dict[str, Any]:
71
  """
72
  Create all specialized subagents.
 
74
  Args:
75
  all_tools: Full list of available tools
76
  llm: Language model instance
77
+ provider: LLM provider name ("openai", "anthropic", "huggingface")
78
 
79
  Returns:
80
  Dict mapping subagent names to agent instances
81
  """
82
+ definitions = SubAgentConfig.get_subagent_definitions(provider=provider)
83
  subagents = {}
84
 
85
  for name in definitions.keys():
86
  subagents[name] = await SubAgentFactory.create_subagent(
87
+ name, all_tools, llm, provider=provider
88
  )
89
 
90
  return subagents
langgraph_agent/subagent_supervisor.py CHANGED
@@ -11,7 +11,7 @@ from langgraph.checkpoint.memory import InMemorySaver
11
  from .subagent_config import SubAgentConfig
12
  from .subagent_factory import SubAgentFactory
13
 
14
- async def create_supervisor_workflow(all_tools: List[Any], llm: BaseChatModel):
15
  """
16
  Create a supervisor workflow that orchestrates specialized subagents.
17
 
@@ -21,33 +21,34 @@ async def create_supervisor_workflow(all_tools: List[Any], llm: BaseChatModel):
21
  Args:
22
  all_tools: Full list of available MCP tools
23
  llm: Language model for both supervisor and subagents
 
24
 
25
  Returns:
26
  Compiled LangGraph workflow with supervisor
27
  """
28
  from langgraph_supervisor import create_supervisor
29
 
30
- # Create the three specialist agents
31
  print("[SUPERVISOR]: Creating specialist agents...")
32
 
33
  image_agent = await SubAgentFactory.create_subagent(
34
- "image_identifier", all_tools, llm
35
  )
36
  species_agent = await SubAgentFactory.create_subagent(
37
- "species_explorer", all_tools, llm
38
  )
39
  taxonomy_agent = await SubAgentFactory.create_subagent(
40
- "taxonomy_specialist", all_tools, llm
41
  )
42
 
43
- # Create supervisor with LLM-based routing
44
  print("[SUPERVISOR]: Creating supervisor orchestrator...")
45
 
46
  # create_supervisor takes a list of agents as first positional argument
47
  workflow = create_supervisor(
48
  [image_agent, species_agent, taxonomy_agent],
49
  model=llm,
50
- prompt=SubAgentConfig.get_router_prompt()
51
  )
52
 
53
  # Compile with shared memory for conversation context
 
11
  from .subagent_config import SubAgentConfig
12
  from .subagent_factory import SubAgentFactory
13
 
14
+ async def create_supervisor_workflow(all_tools: List[Any], llm: BaseChatModel, provider: str = "openai"):
15
  """
16
  Create a supervisor workflow that orchestrates specialized subagents.
17
 
 
21
  Args:
22
  all_tools: Full list of available MCP tools
23
  llm: Language model for both supervisor and subagents
24
+ provider: LLM provider name ("openai", "anthropic", "huggingface")
25
 
26
  Returns:
27
  Compiled LangGraph workflow with supervisor
28
  """
29
  from langgraph_supervisor import create_supervisor
30
 
31
+ # Create the three specialist agents with provider-specific prompts
32
  print("[SUPERVISOR]: Creating specialist agents...")
33
 
34
  image_agent = await SubAgentFactory.create_subagent(
35
+ "image_identifier", all_tools, llm, provider=provider
36
  )
37
  species_agent = await SubAgentFactory.create_subagent(
38
+ "species_explorer", all_tools, llm, provider=provider
39
  )
40
  taxonomy_agent = await SubAgentFactory.create_subagent(
41
+ "taxonomy_specialist", all_tools, llm, provider=provider
42
  )
43
 
44
+ # Create supervisor with LLM-based routing and provider-specific prompt
45
  print("[SUPERVISOR]: Creating supervisor orchestrator...")
46
 
47
  # create_supervisor takes a list of agents as first positional argument
48
  workflow = create_supervisor(
49
  [image_agent, species_agent, taxonomy_agent],
50
  model=llm,
51
+ prompt=SubAgentConfig.get_router_prompt(provider=provider)
52
  )
53
 
54
  # Compile with shared memory for conversation context