AliA1997 commited on
Commit
da83164
Β·
1 Parent(s): 72c50d7

See if final assignment works.

Browse files
Files changed (6) hide show
  1. README.md +8 -9
  2. app.py +200 -62
  3. init_agent.py +132 -0
  4. metadata copy.jsonl +0 -0
  5. metadata.jsonl +0 -0
  6. requirements.txt +11 -0
README.md CHANGED
@@ -1,16 +1,15 @@
1
  ---
2
- title: FinalAssignment AliA
3
- emoji: πŸ’¬
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.42.0
8
  app_file: app.py
9
  pinned: false
10
  hf_oauth: true
11
- hf_oauth_scopes:
12
- - inference-api
13
- short_description: Final Assignment for the AI Agents Course
14
  ---
15
 
16
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
+ title: AI Agents Final Assignment - AliA
3
+ emoji: πŸ•΅πŸ»β€β™‚οΈ
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 5.25.2
8
  app_file: app.py
9
  pinned: false
10
  hf_oauth: true
11
+ # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
+ hf_oauth_expiration_minutes: 480
 
13
  ---
14
 
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,70 +1,208 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
16
  """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
 
 
63
  with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
 
 
 
 
 
 
 
 
68
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
3
+ import requests
4
+ import inspect
5
+ from typing import Optional, Any
6
+ import pandas as pd
7
+ from init_agent import build_workflow
8
+ from langchain_core.messages import HumanMessage
9
+
10
+ # (Keep Constants as is)
11
+ # --- Constants ---
12
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
+
14
+ # --- Basic Agent Definition ---
15
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+ class BasicAgent:
17
+ workflow: Optional[Any]
18
+ def __init__(self):
19
+ print("BasicAgent initialized.")
20
+ self.workflow = build_workflow()
21
+ def __call__(self, question: str) -> str:
22
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
23
+ # fixed_answer = "This is a default answer."
24
+ # print(f"Agent returning fixed answer: {fixed_answer}")
25
+ workflow_response = self.workflow.invoke({
26
+ "messages": [
27
+ HumanMessage(content="What does this code do?: var a = 10; var b = 20;")
28
+ ],
29
+ "classification": "not coding",
30
+ "ai_agent": None
31
+ })
32
+ return workflow_response["messages"][-1].content
33
+
34
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
35
  """
36
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
37
+ and displays the results.
38
  """
39
+ # --- Determine HF Space Runtime URL and Repo URL ---
40
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
41
+
42
+ if profile:
43
+ username= f"{profile.username}"
44
+ print(f"User logged in: {username}")
45
+ else:
46
+ print("User not logged in.")
47
+ return "Please Login to Hugging Face with the button.", None
48
+
49
+ api_url = DEFAULT_API_URL
50
+ questions_url = f"{api_url}/questions"
51
+ submit_url = f"{api_url}/submit"
52
+
53
+ # 1. Instantiate Agent ( modify this part to create your agent)
54
+ try:
55
+ agent = BasicAgent()
56
+ except Exception as e:
57
+ print(f"Error instantiating agent: {e}")
58
+ return f"Error initializing agent: {e}", None
59
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
60
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
61
+ print(agent_code)
62
+
63
+ # 2. Fetch Questions
64
+ print(f"Fetching questions from: {questions_url}")
65
+ try:
66
+ response = requests.get(questions_url, timeout=15)
67
+ response.raise_for_status()
68
+ questions_data = response.json()
69
+ if not questions_data:
70
+ print("Fetched questions list is empty.")
71
+ return "Fetched questions list is empty or invalid format.", None
72
+ print(f"Fetched {len(questions_data)} questions.")
73
+ except requests.exceptions.RequestException as e:
74
+ print(f"Error fetching questions: {e}")
75
+ return f"Error fetching questions: {e}", None
76
+ except requests.exceptions.JSONDecodeError as e:
77
+ print(f"Error decoding JSON response from questions endpoint: {e}")
78
+ print(f"Response text: {response.text[:500]}")
79
+ return f"Error decoding server response for questions: {e}", None
80
+ except Exception as e:
81
+ print(f"An unexpected error occurred fetching questions: {e}")
82
+ return f"An unexpected error occurred fetching questions: {e}", None
83
+
84
+ # 3. Run your Agent
85
+ results_log = []
86
+ answers_payload = []
87
+ print(f"Running agent on {len(questions_data)} questions...")
88
+ for item in questions_data:
89
+ task_id = item.get("task_id")
90
+ question_text = item.get("question")
91
+ if not task_id or question_text is None:
92
+ print(f"Skipping item with missing task_id or question: {item}")
93
+ continue
94
+ try:
95
+ submitted_answer = agent(question_text)
96
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
97
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
98
+ except Exception as e:
99
+ print(f"Error running agent on task {task_id}: {e}")
100
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
101
+
102
+ if not answers_payload:
103
+ print("Agent did not produce any answers to submit.")
104
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
105
+
106
+ # 4. Prepare Submission
107
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
108
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
109
+ print(status_update)
110
+
111
+ # 5. Submit
112
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
113
+ try:
114
+ response = requests.post(submit_url, json=submission_data, timeout=60)
115
+ response.raise_for_status()
116
+ result_data = response.json()
117
+ final_status = (
118
+ f"Submission Successful!\n"
119
+ f"User: {result_data.get('username')}\n"
120
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
121
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
122
+ f"Message: {result_data.get('message', 'No message received.')}"
123
+ )
124
+ print("Submission successful.")
125
+ results_df = pd.DataFrame(results_log)
126
+ return final_status, results_df
127
+ except requests.exceptions.HTTPError as e:
128
+ error_detail = f"Server responded with status {e.response.status_code}."
129
+ try:
130
+ error_json = e.response.json()
131
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
132
+ except requests.exceptions.JSONDecodeError:
133
+ error_detail += f" Response: {e.response.text[:500]}"
134
+ status_message = f"Submission Failed: {error_detail}"
135
+ print(status_message)
136
+ results_df = pd.DataFrame(results_log)
137
+ return status_message, results_df
138
+ except requests.exceptions.Timeout:
139
+ status_message = "Submission Failed: The request timed out."
140
+ print(status_message)
141
+ results_df = pd.DataFrame(results_log)
142
+ return status_message, results_df
143
+ except requests.exceptions.RequestException as e:
144
+ status_message = f"Submission Failed: Network error - {e}"
145
+ print(status_message)
146
+ results_df = pd.DataFrame(results_log)
147
+ return status_message, results_df
148
+ except Exception as e:
149
+ status_message = f"An unexpected error occurred during submission: {e}"
150
+ print(status_message)
151
+ results_df = pd.DataFrame(results_log)
152
+ return status_message, results_df
153
 
154
+
155
+ # --- Build Gradio Interface using Blocks ---
156
  with gr.Blocks() as demo:
157
+ gr.Markdown("# Basic Agent Evaluation Runner")
158
+ gr.Markdown(
159
+ """
160
+ **Instructions:**
161
+
162
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
163
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
164
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
165
+
166
+ ---
167
+ **Disclaimers:**
168
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
169
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
170
+ """
171
+ )
172
+
173
+ gr.LoginButton()
174
+
175
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
176
 
177
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
178
+ # Removed max_rows=10 from DataFrame constructor
179
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
180
+
181
+ run_button.click(
182
+ fn=run_and_submit_all,
183
+ outputs=[status_output, results_table]
184
+ )
185
 
186
  if __name__ == "__main__":
187
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
188
+ # Check for SPACE_HOST and SPACE_ID at startup for information
189
+ space_host_startup = os.getenv("SPACE_HOST")
190
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
191
+
192
+ if space_host_startup:
193
+ print(f"βœ… SPACE_HOST found: {space_host_startup}")
194
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
195
+ else:
196
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
197
+
198
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
199
+ print(f"βœ… SPACE_ID found: {space_id_startup}")
200
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
201
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
202
+ else:
203
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
204
+
205
+ print("-"*(60 + len(" App Starting ")) + "\n")
206
+
207
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
208
+ demo.launch(debug=True, share=False)
init_agent.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from transformers import pipeline
3
+
4
+ from typing import Annotated, TypedDict, Optional, Any
5
+ from langgraph.graph import StateGraph, START, END
6
+ from langgraph.graph.message import add_messages
7
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
8
+
9
+ from langchain_core.messages import AnyMessage, HumanMessage
10
+ from langchain_community.tools import DuckDuckGoSearchRun
11
+
12
+ from langchain_core.tools import Tool
13
+
14
+ hf_token = os.environ.et
15
+
16
+ def init_classifier():
17
+ classifier = pipeline("zero-shot-classification", model='cross-encoder/nli-distilroberta-base')
18
+ return classifier
19
+
20
+
21
+ class CurrentAgent():
22
+ current_llm: HuggingFaceEndpoint
23
+ current_chat: ChatHuggingFace
24
+ def __init__(self):
25
+ self.current_llm = HuggingFaceEndpoint(
26
+ repo_id="Qwen/Qwen3-VL-8B-Instruct",
27
+ huggingfacehub_api_token=hf_token
28
+ )
29
+ self.current_chat = ChatHuggingFace(llm=self.current_llm, verbose=True, tools=[DuckDuckGoSearchRun()])
30
+
31
+ def update_llm(self, model_id: str, hf_token):
32
+ self.current_llm = HuggingFaceEndpoint(
33
+ repo_id=model_id,
34
+ huggingfacehub_api_token=hf_token
35
+ )
36
+ self.current_chat = ChatHuggingFace(llm=self.current_llm, verbose=True, tools=[DuckDuckGoSearchRun()])
37
+
38
+
39
+ # Define a custom agent state:
40
+ class AgentState(TypedDict):
41
+ ai_agent: Optional[CurrentAgent]
42
+ classification: str
43
+ messages: Annotated[list[AnyMessage], add_messages]
44
+
45
+
46
+ tools = [DuckDuckGoSearchRun()]
47
+
48
+
49
+ def classify(state: AgentState) -> AgentState:
50
+ classifier = init_classifier()
51
+ message_to_send = state['messages'][-1].content
52
+ candidate_labels = ["coding", "not coding"]
53
+ classifier_res = classifier(message_to_send, candidate_labels)
54
+ highest_score_label = classifier_res['labels'][0]
55
+ highest_score = classifier_res['scores'][0]
56
+
57
+ new_classification = 'not coding'
58
+
59
+ if(state['ai_agent'] is None):
60
+ state['ai_agent'] = CurrentAgent()
61
+
62
+ if(highest_score_label == 'coding' and highest_score > 0.6):
63
+ new_classification = 'coding'
64
+
65
+ return {
66
+ "ai_agent": state['ai_agent'],
67
+ "classification": new_classification,
68
+ "messages": state['messages']
69
+ }
70
+
71
+ def general_assistant(state: AgentState) -> AgentState:
72
+ if(state['ai_agent'] is None):
73
+ state['ai_agent'] = CurrentAgent()
74
+
75
+ updated_messages = [
76
+ state['ai_agent'].current_chat.invoke(state['messages'])
77
+ ]
78
+
79
+ return {
80
+ "ai_agent": state['ai_agent'],
81
+ "classification": state['classification'],
82
+ "messages": updated_messages
83
+ }
84
+
85
+ def code_assistant(state: AgentState) -> AgentState:
86
+ if(state['ai_agent'] is None):
87
+ state['ai_agent'] = CurrentAgent()
88
+
89
+ state['ai_agent'].update_llm('Qwen/Qwen2.5-Coder-32B-Instruct', hf_token)
90
+ updated_messages = [
91
+ state['ai_agent'].current_chat.invoke(state['messages'])
92
+ ]
93
+
94
+ return {
95
+ "ai_agent": state['ai_agent'],
96
+ "classification": state['classification'],
97
+ "messages": updated_messages
98
+ }
99
+
100
+ def route(state: AgentState):
101
+ mode = state['classification']
102
+ if mode == "coding":
103
+ return "code_assistant"
104
+ else:
105
+ return "general_assistant"
106
+
107
+
108
+
109
+ def build_workflow() -> Any:
110
+ graph_builder = StateGraph(AgentState)
111
+ # Define the nodes:
112
+ graph_builder.add_node("classify", classify)
113
+ graph_builder.add_node("general_assistant", general_assistant)
114
+ graph_builder.add_node("code_assistant", code_assistant)
115
+
116
+
117
+ ### Define Edges
118
+ # The start node, just return the result using chat api with current messages state.
119
+ graph_builder.add_edge(START, "classify")
120
+ # Add a conditional edge
121
+ graph_builder.add_conditional_edges(
122
+ "classify",
123
+ route,
124
+ {
125
+ "general_assistant": "general_assistant",
126
+ "code_assistant": "code_assistant"
127
+ }
128
+ )
129
+
130
+ graph_builder.add_edge("general_assistant", END)
131
+ graph_builder.add_edge("code_assistant", END)
132
+ return graph_builder.compile()
metadata copy.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
metadata.jsonl ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ transformers
4
+ torch
5
+ langgraph
6
+ langchain
7
+ langchain_core
8
+ langchain_community
9
+ langchain_huggingface
10
+ langchain_tools
11
+ huggingface-hub