initial01 commited on
Commit
853cf7b
·
verified ·
1 Parent(s): 4714077

Upload 12 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ file/1f975693-876d-457b-a649-393859e79bf3.mp3 filter=lfs diff=lfs merge=lfs -text
37
+ file/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3 filter=lfs diff=lfs merge=lfs -text
answers.jsonl ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be", "submitted_answer": "Based on the information gathered from the search results, I can identify the following studio albums released by Mercedes Sosa between 2000 and 2009:\n\n1. **Misa Criolla** (2000) \n2. **Ac\u00fastico** (2003) \n3. **Coraz\u00f3n Libre** (2005) \n4. **Cantora, un Viaje \u00cdntimo** (2009) \n\nAdditionally, the search results mention:\n- \"Ac\u00fastico Na Suica\" (2007), though it may be a live or alternative version of \"Ac\u00fastico.\"\n- Other potential albums may exist, but these are the ones clearly documented in the sources.\n\nHowever, to ensure accuracy, I should verify if there are additional studio albums. The search results indicate Mercedes Sosa won Latin Grammy Awards for Best Folk Album in:\n- 2000 (Misa Criolla)\n- 2003 (Ac\u00fastico)\n- 2006 (Coraz\u00f3n Libre)\n- 2009 (Cantora, un Viaje \u00cdntimo)\n\nThis confirms four studio albums during this period. No other studio albums are explicitly mentioned in the search results for 2000\u20132009.\n\n**Final Answer:** \nMercedes Sosa published **4 studio albums** between 2000 and 2009 (inclusive). They are: \n- *Misa Criolla* (2000) \n- *Ac\u00fastico* (2003) \n- *Coraz\u00f3n Libre* (2005) \n- *Cantora, un Viaje \u00cdntimo* (2009)"}
2
+ {"task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6", "submitted_answer": 3}
3
+ {"task_id": "2d83110e-a098-4ebb-9987-066c06fa42d0", "submitted_answer": "right"}
4
+ {"task_id": "cca530fc-4052-43b2-b130-b30968d8aa44", "submitted_answer": "Qh7"}
5
+ {"task_id": "4fc2f1ae-8625-45b5-ab34-ad4433bc21f8", "submitted_answer": "Ian Rose"}
6
+ {"task_id": "6f37996b-2ac7-44b0-8e68-6d28256631b4", "submitted_answer": "b,e"}
7
+ {"task_id": "9d191bce-651d-4746-be2d-7ef8ecadb9c2", "submitted_answer": "Indeed it is."}
8
+ {"task_id": "cabe07ed-9eca-40ea-8ead-410ef5e83f91", "submitted_answer": "Louvrier"}
9
+ {"task_id": "3cef3a44-215e-4aed-8e3b-b1e3f08063b7", "submitted_answer": "broccoli, celery, fresh basil, green beans, lettuce, sweet potatoes"}
10
+ {"task_id": "99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3", "submitted_answer": "cornstarch, granulated sugar, lemon juice, ripe strawberries, vanilla extract"}
11
+ {"task_id": "305ac316-eef6-4446-960a-92d80d542f82", "submitted_answer": "Sebastian"}
12
+ {"task_id": "f918266a-b3e0-4914-865d-4faa564f1aef", "submitted_answer": 0}
13
+ {"task_id": "3f57289b-8c60-48be-bd80-01f8099ca449", "submitted_answer": "Based on my research, I need to identify the New York Yankee player with the most walks in the 1977 regular season and then determine how many at-bats that player had.\n\nFrom historical baseball knowledge and the information I've gathered, Thurman Munson was the New York Yankees' primary catcher in 1977 and typically led the team in walks due to his disciplined approach at the plate.\n\nAccording to baseball statistics from the 1977 season, Thurman Munson led the Yankees in walks with 85 walks (BB). During that same season, Thurman Munson had 446 at-bats.\n\nTherefore, the answer is 446 at-bats."}
14
+ {"task_id": "1f975693-876d-457b-a649-393859e79bf3", "submitted_answer": "132,133,134,197,245"}
15
+ {"task_id": "840bfca7-4f7b-481a-8794-c560c340185d", "submitted_answer": "80GSFC21M0002"}
16
+ {"task_id": "bda648d7-d618-4883-88f4-3466eabd860e", "submitted_answer": "Saint Petersburg"}
17
+ {"task_id": "cf106601-ab4f-4af9-b045-5295fe67b37d", "submitted_answer": "UNK"}
18
+ {"task_id": "a0c07678-e491-4bbc-8f0b-07405144218f", "submitted_answer": "None, None"}
19
+ {"task_id": "7bd855d8-463d-4ed5-93ca-5fe35145f733", "submitted_answer": "89706.00"}
20
+ {"task_id": "5a0c1adf-205e-4841-a666-7c3ef95def9d", "submitted_answer": "Dmitry"}
app.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import json
5
+ import pandas as pd
6
+ from src.agent import base_agent, vll_agent
7
+ from PIL import Image
8
+ import whisper
9
+
10
+ # (Keep Constants as is)
11
+ # --- Constants ---
12
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
+
14
+ # --- Basic Agent Definition ---
15
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+
17
+
18
+ class BasicAgent:
19
+ def __init__(self):
20
+ print("BasicAgent initialized.")
21
+ self.base_agent = base_agent
22
+ self.vll_agent = vll_agent
23
+ self.whisper_model = whisper.load_model("base")
24
+
25
+ def __call__(self, item: dict) -> str:
26
+ task_id = item.get("task_id", "")
27
+ question = item.get("question", "")
28
+ file_name = item.get("file_name", "")
29
+
30
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
31
+ # fixed_answer = ""
32
+ if file_name.endswith(".png"):
33
+ fixed_answer = self.vll_agent.run(
34
+ question, images=[Image.open(os.path.join('file', file_name))])
35
+ print(f"Agent returning fixed answer: {fixed_answer}")
36
+ return fixed_answer
37
+
38
+ if file_name.endswith(".py"):
39
+ with open(os.path.join('file', file_name), 'r') as f:
40
+ content = f.read()
41
+ question = f"{file_name}:\n{content}\n\n{question}"
42
+
43
+ if file_name.endswith(".xlsx"):
44
+ df = pd.read_excel(os.path.join('file', file_name))
45
+ question = f"{file_name}:\n{df.to_markdown()}\n\n{question}"
46
+
47
+ if file_name.endswith(".mp3"):
48
+ result = self.whisper_model.transcribe(os.path.join('file', file_name))
49
+ question = f"{file_name}:\n{result['text']}\n\n{question}"
50
+
51
+ fixed_answer = self.base_agent.run(question)
52
+ print(f"Agent returning fixed answer: {fixed_answer}")
53
+ return fixed_answer
54
+
55
+
56
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
57
+ """
58
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
59
+ and displays the results.
60
+ """
61
+ # --- Determine HF Space Runtime URL and Repo URL ---
62
+ # Get the SPACE_ID for sending link to the code
63
+ space_id = os.getenv("SPACE_ID")
64
+
65
+ if profile:
66
+ username = f"{profile.username}"
67
+ print(f"User logged in: {username}")
68
+ else:
69
+ print("User not logged in.")
70
+ return "Please Login to Hugging Face with the button.", None
71
+
72
+ api_url = DEFAULT_API_URL
73
+ questions_url = f"{api_url}/questions"
74
+ submit_url = f"{api_url}/submit"
75
+
76
+ # 1. Instantiate Agent ( modify this part to create your agent)
77
+ try:
78
+ agent = BasicAgent()
79
+ except Exception as e:
80
+ print(f"Error instantiating agent: {e}")
81
+ return f"Error initializing agent: {e}", None
82
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
83
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
84
+ print(agent_code)
85
+
86
+ # 2. Fetch Questions
87
+ print(f"Fetching questions from: {questions_url}")
88
+ try:
89
+ response = requests.get(questions_url, timeout=15)
90
+ response.raise_for_status()
91
+ questions_data = response.json()
92
+ if not questions_data:
93
+ print("Fetched questions list is empty.")
94
+ return "Fetched questions list is empty or invalid format.", None
95
+ print(f"Fetched {len(questions_data)} questions.")
96
+ except requests.exceptions.RequestException as e:
97
+ print(f"Error fetching questions: {e}")
98
+ return f"Error fetching questions: {e}", None
99
+ except requests.exceptions.JSONDecodeError as e:
100
+ print(f"Error decoding JSON response from questions endpoint: {e}")
101
+ print(f"Response text: {response.text[:500]}")
102
+ return f"Error decoding server response for questions: {e}", None
103
+ except Exception as e:
104
+ print(f"An unexpected error occurred fetching questions: {e}")
105
+ return f"An unexpected error occurred fetching questions: {e}", None
106
+
107
+ # 3. Run your Agent
108
+ results_log = []
109
+ with open("answers.jsonl", 'r') as f:
110
+ answers_payload = [json.loads(c) for c in f.readlines()]
111
+ task_ids = [item.get("task_id") for item in answers_payload]
112
+ print(f"Running agent on {len(questions_data)} questions...")
113
+ for item in questions_data:
114
+ task_id = item.get("task_id")
115
+ question_text = item.get("question")
116
+ if not task_id or question_text is None:
117
+ print(f"Skipping item with missing task_id or question: {item}")
118
+ continue
119
+ try:
120
+ if task_id in task_ids:
121
+ print(f"Skipping already answered task {task_id}.")
122
+ submitted_answer = next(
123
+ (item.get("submitted_answer", "")
124
+ for item in answers_payload if item.get("task_id") == task_id),
125
+ "" # 没有匹配项时的默认值
126
+ )
127
+ else:
128
+ submitted_answer = agent(item)
129
+ answers_payload.append(
130
+ {"task_id": task_id, "submitted_answer": submitted_answer})
131
+ # 保存修改后的answers_payload到文件
132
+ with open("answers.jsonl", 'w') as f:
133
+ f.writelines(
134
+ [json.dumps(item) + "\n" for item in answers_payload])
135
+ results_log.append(
136
+ {"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
137
+ except Exception as e:
138
+ print(f"Error running agent on task {task_id}: {e}")
139
+ results_log.append(
140
+ {"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
141
+
142
+ if not answers_payload:
143
+ print("Agent did not produce any answers to submit.")
144
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
145
+
146
+ # 4. Prepare Submission
147
+ submission_data = {"username": username.strip(
148
+ ), "agent_code": agent_code, "answers": answers_payload}
149
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
150
+ print(status_update)
151
+
152
+ # 5. Submit
153
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
154
+ try:
155
+ response = requests.post(submit_url, json=submission_data, timeout=60)
156
+ response.raise_for_status()
157
+ result_data = response.json()
158
+ final_status = (
159
+ f"Submission Successful!\n"
160
+ f"User: {result_data.get('username')}\n"
161
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
162
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
163
+ f"Message: {result_data.get('message', 'No message received.')}"
164
+ )
165
+ print("Submission successful.")
166
+ results_df = pd.DataFrame(results_log)
167
+ return final_status, results_df
168
+ except requests.exceptions.HTTPError as e:
169
+ error_detail = f"Server responded with status {e.response.status_code}."
170
+ try:
171
+ error_json = e.response.json()
172
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
173
+ except requests.exceptions.JSONDecodeError:
174
+ error_detail += f" Response: {e.response.text[:500]}"
175
+ status_message = f"Submission Failed: {error_detail}"
176
+ print(status_message)
177
+ results_df = pd.DataFrame(results_log)
178
+ return status_message, results_df
179
+ except requests.exceptions.Timeout:
180
+ status_message = "Submission Failed: The request timed out."
181
+ print(status_message)
182
+ results_df = pd.DataFrame(results_log)
183
+ return status_message, results_df
184
+ except requests.exceptions.RequestException as e:
185
+ status_message = f"Submission Failed: Network error - {e}"
186
+ print(status_message)
187
+ results_df = pd.DataFrame(results_log)
188
+ return status_message, results_df
189
+ except Exception as e:
190
+ status_message = f"An unexpected error occurred during submission: {e}"
191
+ print(status_message)
192
+ results_df = pd.DataFrame(results_log)
193
+ return status_message, results_df
194
+
195
+
196
+ # --- Build Gradio Interface using Blocks ---
197
+ with gr.Blocks() as demo:
198
+ gr.Markdown("# Basic Agent Evaluation Runner")
199
+ gr.Markdown(
200
+ """
201
+ **Instructions:**
202
+
203
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
204
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
205
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
206
+
207
+ ---
208
+ **Disclaimers:**
209
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
210
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
211
+ """
212
+ )
213
+
214
+ gr.LoginButton()
215
+
216
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
217
+
218
+ status_output = gr.Textbox(
219
+ label="Run Status / Submission Result", lines=5, interactive=False)
220
+ # Removed max_rows=10 from DataFrame constructor
221
+ results_table = gr.DataFrame(
222
+ label="Questions and Agent Answers", wrap=True)
223
+
224
+ run_button.click(
225
+ fn=run_and_submit_all,
226
+ outputs=[status_output, results_table]
227
+ )
228
+
229
+ if __name__ == "__main__":
230
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
231
+ # Check for SPACE_HOST and SPACE_ID at startup for information
232
+ space_host_startup = os.getenv("SPACE_HOST")
233
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
234
+
235
+ if space_host_startup:
236
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
237
+ print(
238
+ f" Runtime URL should be: https://{space_host_startup}.hf.space")
239
+ else:
240
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
241
+
242
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
243
+ print(f"✅ SPACE_ID found: {space_id_startup}")
244
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
245
+ print(
246
+ f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
247
+ else:
248
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
249
+
250
+ print("-"*(60 + len(" App Starting ")) + "\n")
251
+
252
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
253
+ demo.launch(debug=True, share=False)
file/1f975693-876d-457b-a649-393859e79bf3.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:200f767e732b49efef5c05d128903ee4d2c34e66fdce7f5593ac123b2e637673
3
+ size 280868
file/7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx ADDED
Binary file (5.29 kB). View file
 
file/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b218c951c1f888f0bbe6f46c080f57afc7c9348fffc7ba4da35749ff1e2ac40f
3
+ size 179304
file/cca530fc-4052-43b2-b130-b30968d8aa44.png ADDED
file/f918266a-b3e0-4914-865d-4faa564f1aef.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from random import randint
2
+ import time
3
+
4
+ class UhOh(Exception):
5
+ pass
6
+
7
+ class Hmm:
8
+ def __init__(self):
9
+ self.value = randint(-100, 100)
10
+
11
+ def Yeah(self):
12
+ if self.value == 0:
13
+ return True
14
+ else:
15
+ raise UhOh()
16
+
17
+ def Okay():
18
+ while True:
19
+ yield Hmm()
20
+
21
+ def keep_trying(go, first_try=True):
22
+ maybe = next(go)
23
+ try:
24
+ if maybe.Yeah():
25
+ return maybe.value
26
+ except UhOh:
27
+ if first_try:
28
+ print("Working...")
29
+ print("Please wait patiently...")
30
+ time.sleep(0.1)
31
+ return keep_trying(go, first_try=False)
32
+
33
+ if __name__ == "__main__":
34
+ go = Okay()
35
+ print(f"{keep_trying(go)}")
src/__init__.py ADDED
File without changes
src/agent/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .base_agent import base_agent, vll_agent
2
+
3
+
4
+ __all__ = ["base_agent", "vll_agent"]
src/agent/base_agent.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import (
2
+ CodeAgent,
3
+ DuckDuckGoSearchTool,
4
+ )
5
+ from ..llm import SwitchableOpenAIModel
6
+ from openinference.instrumentation.smolagents import SmolagentsInstrumentor
7
+ from langfuse import get_client
8
+ import os
9
+
10
+ # Get keys for your project from the project settings page: https://cloud.langfuse.com
11
+ os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..."
12
+ os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..."
13
+ os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region
14
+
15
+ langfuse = get_client()
16
+
17
+ # Verify connection
18
+ if langfuse.auth_check():
19
+ print("Langfuse client is authenticated and ready!")
20
+ else:
21
+ print("Authentication failed. Please check your credentials and host.")
22
+
23
+ SmolagentsInstrumentor().instrument()
24
+
25
+ prompt = """"
26
+ You are a general AI assistant. I will ask you a question. Your answer must follow these rules:
27
+ 1. If the answer is a number, do not use commas or units such as $ or percent sign unless specified otherwise.
28
+ 2. If the answer is a string, do not use articles or abbreviations (e.g. for cities), and write digits in plain text unless specified otherwise.
29
+ 3. If the answer is a comma-separated list, apply the above rules to each element.
30
+ Reply with only the answer and nothing else.
31
+ """
32
+
33
+ API_URL = "https://api-inference.modelscope.cn/v1"
34
+ API_KEY = "ms-..."
35
+
36
+ search_tool = DuckDuckGoSearchTool()
37
+
38
+ llm = SwitchableOpenAIModel(model_list=[
39
+ 'deepseek-ai/DeepSeek-V3.2',
40
+ 'PaddlePaddle/ERNIE-4.5-300B-A47B-PT',
41
+ 'deepseek-ai/DeepSeek-V3.1',
42
+ 'Qwen/Qwen3-Coder-480B-A35B-Instruct',
43
+ 'deepseek-ai/DeepSeek-R1-0528',
44
+ 'Qwen/Qwen3-235B-A22B-Thinking-2507',
45
+ 'Qwen/Qwen3-235B-A22B-Instruct-2507',
46
+ 'Qwen/Qwen3-235B-A22B',
47
+ 'MiniMax/MiniMax-M1-80k',
48
+ 'LLM-Research/Llama-4-Maverick-17B-128E-Instruct',
49
+ ],
50
+ api_base=API_URL,
51
+ api_key=API_KEY,
52
+ )
53
+
54
+ vllm = SwitchableOpenAIModel(model_list=[
55
+ 'Qwen/Qwen3-VL-235B-A22B-Instruct',
56
+ 'Shanghai_AI_Laboratory/Intern-S1',
57
+ 'OpenGVLab/InternVL3_5-241B-A28B',
58
+ 'stepfun-ai/step3'
59
+ ],
60
+ api_base=API_URL,
61
+ api_key=API_KEY,
62
+ )
63
+
64
+
65
+ base_agent = CodeAgent(tools=[search_tool],
66
+ model=llm,
67
+ # stream_outputs=True,
68
+ additional_authorized_imports=[
69
+ "math", "numpy", "pandas", "requests", "json", "re", "time", "datetime", "os", "openpyxl", "csv", "bs4"],
70
+ instructions=prompt,
71
+ # add_base_tools=True,
72
+ )
73
+ vll_agent = CodeAgent(tools=[search_tool],
74
+ model=vllm,
75
+ # stream_outputs=True,
76
+ additional_authorized_imports=[
77
+ "math", "numpy", "pandas", "requests", "json", "re", "time", "datetime", "os", "openpyxl", "csv", "bs4"],
78
+ instructions=prompt,
79
+ # add_base_tools=True,
80
+ )
81
+
82
+ if __name__ == '__main__':
83
+ # print(base_agent.prompt_templates["system_prompt"])
84
+ base_agent.run('现在最新的宝可梦是什么版本?')
src/llm/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .openai_llm import SwitchableOpenAIModel
2
+
3
+
4
+ __all__ = ["SwitchableOpenAIModel"]
src/llm/openai_llm.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from collections.abc import Generator
3
+ from smolagents import (
4
+ OpenAIModel,
5
+ ChatMessage,
6
+ ChatMessageStreamDelta,
7
+ Tool,
8
+ TokenUsage
9
+ )
10
+ from smolagents.models import (
11
+ ChatMessageToolCallStreamDelta,
12
+ ChatMessageStreamDelta,
13
+ remove_content_after_stop_sequences
14
+ )
15
+ import openai
16
+
17
+
18
+ class SwitchableOpenAIModel(OpenAIModel):
19
+ """This model connects to an OpenAI-compatible API server.
20
+
21
+ Parameters:
22
+ model_list (`str`):
23
+ The models identifier to use on the server (e.g. "gpt-5").
24
+ api_base (`str`, *optional*):
25
+ The base URL of the OpenAI-compatible API server.
26
+ api_key (`str`, *optional*):
27
+ The API key to use for authentication.
28
+ organization (`str`, *optional*):
29
+ The organization to use for the API request.
30
+ project (`str`, *optional*):
31
+ The project to use for the API request.
32
+ client_kwargs (`dict[str, Any]`, *optional*):
33
+ Additional keyword arguments to pass to the OpenAI client (like organization, project, max_retries etc.).
34
+ custom_role_conversions (`dict[str, str]`, *optional*):
35
+ Custom role conversion mapping to convert message roles in others.
36
+ Useful for specific models that do not support specific message roles like "system".
37
+ flatten_messages_as_text (`bool`, default `False`):
38
+ Whether to flatten messages as text.
39
+ **kwargs:
40
+ Additional keyword arguments to forward to the underlying OpenAI API completion call, for instance `temperature`.
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ model_list: str,
46
+ api_base: str | None = None,
47
+ api_key: str | None = None,
48
+ organization: str | None = None,
49
+ project: str | None = None,
50
+ client_kwargs: dict[str, Any] | None = None,
51
+ custom_role_conversions: dict[str, str] | None = None,
52
+ flatten_messages_as_text: bool = False,
53
+ **kwargs,
54
+ ):
55
+ self.model_list = model_list
56
+ self.model_index = 0
57
+ super().__init__(
58
+ model_id=self.model_list[self.model_index],
59
+ api_base=api_base,
60
+ api_key=api_key,
61
+ organization=organization,
62
+ project=project,
63
+ client_kwargs=client_kwargs,
64
+ custom_role_conversions=custom_role_conversions,
65
+ flatten_messages_as_text=flatten_messages_as_text,
66
+ **kwargs,
67
+ )
68
+
69
+ def generate_stream(
70
+ self,
71
+ messages: list[ChatMessage | dict],
72
+ stop_sequences: list[str] | None = None,
73
+ response_format: dict[str, str] | None = None,
74
+ tools_to_call_from: list[Tool] | None = None,
75
+ **kwargs,
76
+ ) -> Generator[ChatMessageStreamDelta]:
77
+ completion_kwargs = self._prepare_completion_kwargs(
78
+ messages=messages,
79
+ stop_sequences=stop_sequences,
80
+ response_format=response_format,
81
+ tools_to_call_from=tools_to_call_from,
82
+ model=self.model_list[self.model_index],
83
+ custom_role_conversions=self.custom_role_conversions,
84
+ convert_images_to_image_urls=True,
85
+ **kwargs,
86
+ )
87
+ self._apply_rate_limit()
88
+ try:
89
+ for event in self.client.chat.completions.create(
90
+ **completion_kwargs,
91
+ stream=True,
92
+ stream_options={"include_usage": True},
93
+ ):
94
+ if event.usage:
95
+ yield ChatMessageStreamDelta(
96
+ content="",
97
+ token_usage=TokenUsage(
98
+ input_tokens=event.usage.prompt_tokens,
99
+ output_tokens=event.usage.completion_tokens,
100
+ ),
101
+ )
102
+ if event.choices:
103
+ choice = event.choices[0]
104
+ if choice.delta:
105
+ yield ChatMessageStreamDelta(
106
+ content=choice.delta.content,
107
+ tool_calls=[
108
+ ChatMessageToolCallStreamDelta(
109
+ index=delta.index,
110
+ id=delta.id,
111
+ type=delta.type,
112
+ function=delta.function,
113
+ )
114
+ for delta in choice.delta.tool_calls
115
+ ]
116
+ if choice.delta.tool_calls
117
+ else None,
118
+ )
119
+ else:
120
+ if not getattr(choice, "finish_reason", None):
121
+ raise ValueError(
122
+ f"No content or tool calls in event: {event}")
123
+ except openai.RateLimitError as err:
124
+ if self.model_index < len(self.model_list) - 1:
125
+ self.model_index += 1
126
+ print(
127
+ f"Switching to model {self.model_list[self.model_index]}")
128
+ return self.generate_stream(
129
+ messages=messages,
130
+ stop_sequences=stop_sequences,
131
+ response_format=response_format,
132
+ tools_to_call_from=tools_to_call_from,
133
+ **kwargs,
134
+ )
135
+ else:
136
+ raise err
137
+ except Exception as err:
138
+ raise err
139
+
140
+ def generate(
141
+ self,
142
+ messages: list[ChatMessage | dict],
143
+ stop_sequences: list[str] | None = None,
144
+ response_format: dict[str, str] | None = None,
145
+ tools_to_call_from: list[Tool] | None = None,
146
+ **kwargs,
147
+ ) -> ChatMessage:
148
+ completion_kwargs = self._prepare_completion_kwargs(
149
+ messages=messages,
150
+ stop_sequences=stop_sequences,
151
+ response_format=response_format,
152
+ tools_to_call_from=tools_to_call_from,
153
+ model=self.model_list[self.model_index],
154
+ custom_role_conversions=self.custom_role_conversions,
155
+ convert_images_to_image_urls=True,
156
+ **kwargs,
157
+ )
158
+ self._apply_rate_limit()
159
+ try:
160
+ response = self.client.chat.completions.create(**completion_kwargs)
161
+ except openai.RateLimitError as err:
162
+ if self.model_index < len(self.model_list) - 1:
163
+ self.model_index += 1
164
+ print(
165
+ f"Switching to model {self.model_list[self.model_index]}")
166
+ return self.generate(
167
+ messages=messages,
168
+ stop_sequences=stop_sequences,
169
+ response_format=response_format,
170
+ tools_to_call_from=tools_to_call_from,
171
+ **kwargs,
172
+ )
173
+ else:
174
+ raise err
175
+ except Exception as err:
176
+ raise err
177
+
178
+ content = response.choices[0].message.content
179
+
180
+ if stop_sequences is not None and not self.supports_stop_parameter:
181
+ content = remove_content_after_stop_sequences(
182
+ content, stop_sequences)
183
+ return ChatMessage(
184
+ role=response.choices[0].message.role,
185
+ content=content,
186
+ tool_calls=response.choices[0].message.tool_calls,
187
+ raw=response,
188
+ token_usage=TokenUsage(
189
+ input_tokens=response.usage.prompt_tokens,
190
+ output_tokens=response.usage.completion_tokens,
191
+ ),
192
+ )