WaelDahech commited on
Commit
18b9371
·
1 Parent(s): 40fb629

add openai req

Browse files
Files changed (1) hide show
  1. app.py +46 -233
app.py CHANGED
@@ -1,254 +1,67 @@
1
- import os
2
- import gradio as gr
3
  import requests
4
- import inspect
5
- import pandas as pd
 
6
 
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
- from my_tools import tools_list
12
 
13
- from smolagents import Tool
14
- from smolagents import CodeAgent,DuckDuckGoSearchTool, InferenceClientModel,load_tool,tool
15
 
16
- # --- Basic Agent Definition ---
17
- import yaml
18
- #with open("prompts.yaml", 'r') as stream:
19
- # prompt_templates = yaml.safe_load(stream)
20
 
21
- import os
22
- from dotenv import load_dotenv
23
 
24
- # 1) load your .env (skip if you set it in the shell)
25
- load_dotenv()
 
 
 
 
 
26
 
27
- # 2) grab the key
28
- openai_api_key = os.getenv("OPENAI_API_KEY")
29
 
30
- # 3) build the Smolagents model
31
- from smolagents import OpenAIServerModel
32
 
33
- model = OpenAIServerModel(
34
- model_id="gpt-4.1", # or "gpt-3.5-turbo", etc.
35
- api_base="https://api.openai.com/v1", # OpenAI’s standard endpoint
36
- api_key=openai_api_key # your secret key
37
- ) # :contentReference[oaicite:1]{index=1}
38
 
39
- # 4) create a CodeAgent powered by that model
40
- from smolagents import CodeAgent, DuckDuckGoSearchTool
41
 
42
- OpenAIAgent = CodeAgent(
43
- tools=[DuckDuckGoSearchTool()], # any tools you want
44
- model=model,
45
- # you can allow extra imports if needed:
46
- # additional_authorized_imports=["requests", "bs4"],
47
- )
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- MyAgent = CodeAgent(
50
- model= InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct"),
51
- tools=[],
 
 
 
52
  max_steps=6,
53
  verbosity_level=1,
54
  grammar=None,
55
  planning_interval=None,
56
  name=None,
57
  description=None,
58
- prompt_templates=None
59
  )
60
 
61
- class BasicAgent:
62
- def __init__(self):
63
- print("BasicAgent initialized.")
64
- def __call__(self, question: str) -> str:
65
- print(f"Agent received question (first 50 chars): {question[:50]}...")
66
- fixed_answer = "This is a default answer."
67
- print(f"Agent returning fixed answer: {fixed_answer}")
68
- return fixed_answer
69
-
70
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
71
- class BasicAgent:
72
- def __init__(self):
73
- print("BasicAgent initialized.")
74
- def __call__(self, question: str) -> str:
75
- print(f"Agent received question (first 50 chars): {question[:50]}...")
76
- fixed_answer = "This is a default answer."
77
- print(f"Agent returning fixed answer: {fixed_answer}")
78
- return fixed_answer
79
-
80
- def run_and_submit_all( profile: gr.OAuthProfile | None):
81
- """
82
- Fetches all questions, runs the BasicAgent on them, submits all answers,
83
- and displays the results.
84
- """
85
- # --- Determine HF Space Runtime URL and Repo URL ---
86
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
87
-
88
- if profile:
89
- username= f"{profile.username}"
90
- print(f"User logged in: {username}")
91
- else:
92
- print("User not logged in.")
93
- return "Please Login to Hugging Face with the button.", None
94
-
95
- api_url = DEFAULT_API_URL
96
- questions_url = f"{api_url}/questions"
97
- submit_url = f"{api_url}/submit"
98
-
99
- # 1. Instantiate Agent ( modify this part to create your agent)
100
- try:
101
- agent = OpenAIAgent()
102
- except Exception as e:
103
- print(f"Error instantiating agent: {e}")
104
- return f"Error initializing agent: {e}", None
105
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
106
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
107
- print(agent_code)
108
-
109
- # 2. Fetch Questions
110
- print(f"Fetching questions from: {questions_url}")
111
- try:
112
- response = requests.get(questions_url, timeout=15)
113
- response.raise_for_status()
114
- questions_data = response.json()
115
- if not questions_data:
116
- print("Fetched questions list is empty.")
117
- return "Fetched questions list is empty or invalid format.", None
118
- print(f"Fetched {len(questions_data)} questions.")
119
- except requests.exceptions.RequestException as e:
120
- print(f"Error fetching questions: {e}")
121
- return f"Error fetching questions: {e}", None
122
- except requests.exceptions.JSONDecodeError as e:
123
- print(f"Error decoding JSON response from questions endpoint: {e}")
124
- print(f"Response text: {response.text[:500]}")
125
- return f"Error decoding server response for questions: {e}", None
126
- except Exception as e:
127
- print(f"An unexpected error occurred fetching questions: {e}")
128
- return f"An unexpected error occurred fetching questions: {e}", None
129
-
130
- # 3. Run your Agent
131
- results_log = []
132
- answers_payload = []
133
- print(f"Running agent on {len(questions_data)} questions...")
134
- for item in questions_data:
135
- task_id = item.get("task_id")
136
- question_text = item.get("question")
137
- if not task_id or question_text is None:
138
- print(f"Skipping item with missing task_id or question: {item}")
139
- continue
140
- try:
141
- submitted_answer = agent(question_text)
142
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
143
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
144
- except Exception as e:
145
- print(f"Error running agent on task {task_id}: {e}")
146
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
147
-
148
- if not answers_payload:
149
- print("Agent did not produce any answers to submit.")
150
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
151
-
152
- # 4. Prepare Submission
153
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
154
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
155
- print(status_update)
156
-
157
- # 5. Submit
158
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
159
- try:
160
- response = requests.post(submit_url, json=submission_data, timeout=60)
161
- response.raise_for_status()
162
- result_data = response.json()
163
- final_status = (
164
- f"Submission Successful!\n"
165
- f"User: {result_data.get('username')}\n"
166
- f"Overall Score: {result_data.get('score', 'N/A')}% "
167
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
168
- f"Message: {result_data.get('message', 'No message received.')}"
169
- )
170
- print("Submission successful.")
171
- results_df = pd.DataFrame(results_log)
172
- return final_status, results_df
173
- except requests.exceptions.HTTPError as e:
174
- error_detail = f"Server responded with status {e.response.status_code}."
175
- try:
176
- error_json = e.response.json()
177
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
178
- except requests.exceptions.JSONDecodeError:
179
- error_detail += f" Response: {e.response.text[:500]}"
180
- status_message = f"Submission Failed: {error_detail}"
181
- print(status_message)
182
- results_df = pd.DataFrame(results_log)
183
- return status_message, results_df
184
- except requests.exceptions.Timeout:
185
- status_message = "Submission Failed: The request timed out."
186
- print(status_message)
187
- results_df = pd.DataFrame(results_log)
188
- return status_message, results_df
189
- except requests.exceptions.RequestException as e:
190
- status_message = f"Submission Failed: Network error - {e}"
191
- print(status_message)
192
- results_df = pd.DataFrame(results_log)
193
- return status_message, results_df
194
- except Exception as e:
195
- status_message = f"An unexpected error occurred during submission: {e}"
196
- print(status_message)
197
- results_df = pd.DataFrame(results_log)
198
- return status_message, results_df
199
-
200
-
201
- # --- Build Gradio Interface using Blocks ---
202
- with gr.Blocks() as demo:
203
- gr.Markdown("# Basic Agent Evaluation Runner")
204
- gr.Markdown(
205
- """
206
- **Instructions:**
207
-
208
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
209
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
210
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
211
-
212
- ---
213
- **Disclaimers:**
214
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
215
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
216
- """
217
- )
218
-
219
- gr.LoginButton()
220
-
221
- run_button = gr.Button("Run Evaluation & Submit All Answers")
222
-
223
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
224
- # Removed max_rows=10 from DataFrame constructor
225
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
226
-
227
- run_button.click(
228
- fn=run_and_submit_all,
229
- outputs=[status_output, results_table]
230
- )
231
-
232
- if __name__ == "__main__":
233
- print("\n" + "-"*30 + " App Starting " + "-"*30)
234
- # Check for SPACE_HOST and SPACE_ID at startup for information
235
- space_host_startup = os.getenv("SPACE_HOST")
236
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
237
-
238
- if space_host_startup:
239
- print(f"✅ SPACE_HOST found: {space_host_startup}")
240
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
241
- else:
242
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
243
-
244
- if space_id_startup: # Print repo URLs if SPACE_ID is found
245
- print(f"✅ SPACE_ID found: {space_id_startup}")
246
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
247
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
248
- else:
249
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
250
-
251
- print("-"*(60 + len(" App Starting ")) + "\n")
252
-
253
- print("Launching Gradio Interface for Basic Agent Evaluation...")
254
- demo.launch(debug=True, share=False)
 
1
+ from smolagents import CodeAgent,DuckDuckGoSearchTool, InferenceClientModel,load_tool,tool
2
+ import datetime
3
  import requests
4
+ import pytz
5
+ import yaml
6
+ from tools.final_answer import FinalAnswerTool
7
 
8
+ from smolagents import Tool
 
 
9
 
10
+ from Gradio_UI import GradioUI
11
 
12
+ # Below is an example of a tool that does nothing. Amaze us with your creativity !
 
13
 
14
+ final_answer = FinalAnswerTool()
 
 
 
15
 
 
 
16
 
17
+ system_promot = """
18
+ You are a helpful assistant tasked with answering questions using a set of tools.
19
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
20
+ FINAL ANSWER: [YOUR FINAL ANSWER].
21
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
22
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
23
+ """
24
 
25
+ # Import tool from Hub
 
26
 
27
+ with open("prompts.yaml", 'r') as stream:
28
+ prompt_templates = yaml.safe_load(stream)
29
 
 
 
 
 
 
30
 
 
 
31
 
32
+ # === Tool Definitions ===
33
+
34
+
35
+ TOOL_REGISTRY = [
36
+ Tool(name="wikipedia_search", entry_point="mytools.wikipedia_search.call"),
37
+ Tool(name="youtube_transcript", entry_point="mytools.youtube_transcript.call"),
38
+ Tool(name="video_frame_analyzer", entry_point="mytools.video_frame_analyzer.call"),
39
+ Tool(name="string_manipulator", entry_point="mytools.string_manipulator.call"),
40
+ Tool(name="vision_chess_engine", entry_point="mytools.vision_chess_engine.call"),
41
+ Tool(name="table_parser", entry_point="mytools.table_parser.call"),
42
+ Tool(name="libretext_fetcher", entry_point="mytools.libretext_fetcher.call"),
43
+ Tool(name="audio_transcriber", entry_point="mytools.audio_transcriber.call"),
44
+ Tool(name="botanical_classifier", entry_point="mytools.botanical_classifier.call"),
45
+ Tool(name="imdb_lookup", entry_point="mytools.imdb_lookup.call"),
46
+ Tool(name="excel_reader", entry_point="mytools.excel_reader.call"),
47
+ Tool(name="competition_db", entry_point="mytools.competition_db.call"),
48
+ Tool(name="japanese_baseball_api", entry_point="mytools.japanese_baseball_api.call"),
49
+ ]
50
 
51
+
52
+ model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
53
+
54
+ agent = CodeAgent(
55
+ model= InferenceClientModel(model_id=model_id),
56
+ tools=[final_answer, *TOOL_REGISTRY], ## add your tools here (don't remove final answer)
57
  max_steps=6,
58
  verbosity_level=1,
59
  grammar=None,
60
  planning_interval=None,
61
  name=None,
62
  description=None,
63
+ prompt_templates=prompt_templates
64
  )
65
 
66
+
67
+ GradioUI(agent).launch(share=False)