HollowVoice commited on
Commit
ddba08c
·
1 Parent(s): 88cabcd

Code is now running and getting one answer correct

Browse files
.gitignore ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Ruff stuff:
171
+ .ruff_cache/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
.langgraph_api/.langgraph_checkpoint.1.pckl ADDED
Binary file (24.6 kB). View file
 
.langgraph_api/.langgraph_checkpoint.2.pckl ADDED
Binary file (5.23 kB). View file
 
.langgraph_api/.langgraph_checkpoint.3.pckl ADDED
Binary file (13.9 kB). View file
 
.langgraph_api/.langgraph_ops.pckl ADDED
Binary file (14.1 kB). View file
 
.langgraph_api/.langgraph_retry_counter.pckl ADDED
Binary file (117 Bytes). View file
 
.langgraph_api/store.pckl ADDED
Binary file (6 Bytes). View file
 
.langgraph_api/store.vectors.pckl ADDED
Binary file (6 Bytes). View file
 
agent.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain_core.messages import SystemMessage, HumanMessage
4
+ from langchain_openai import AzureChatOpenAI
5
+
6
+ from langgraph.graph import START, StateGraph, MessagesState
7
+ from langgraph.prebuilt import tools_condition, ToolNode
8
+ from langchain_core.runnables import RunnableConfig
9
+
10
+
11
+ load_dotenv()
12
+
13
+
14
+ # --- TOOLS ---
15
+ def add(a: int, b: int) -> int:
16
+ """Adds a and b.
17
+
18
+ Args:
19
+ a: first int
20
+ b: second int
21
+ """
22
+ return a + b
23
+
24
+
25
+ def multiply(a: int, b: int) -> int:
26
+ """Multiplies a and b.
27
+
28
+ Args:
29
+ a: first int
30
+ b: second int
31
+ """
32
+ return a * b
33
+
34
+
35
+ tools = [add, multiply]
36
+
37
+
38
+ # --- GRAPH ---
39
+ # This functions allow us to use the web interface to test the graph
40
+ def make_graph(config: RunnableConfig):
41
+ graph = create_graph()
42
+ return graph
43
+
44
+
45
+ # This function is used to create the graph
46
+ def create_graph():
47
+ # Define LLM with bound tools
48
+ azure_endpoint = os.environ.get("AZURE_ENDPOINT_LLM")
49
+ api_key = os.environ.get("AZURE_API_KEY_LLM")
50
+ api_version = os.environ.get("AZURE_API_VERSION_LLM")
51
+ deployment = os.environ.get("AZURE_DEPLOYMENT_LLM")
52
+
53
+ # Initialize LLM
54
+ llm = AzureChatOpenAI(
55
+ azure_deployment=deployment,
56
+ api_version=api_version,
57
+ temperature=0,
58
+ max_tokens=None,
59
+ timeout=None,
60
+ max_retries=2,
61
+ api_key=api_key,
62
+ azure_endpoint=azure_endpoint,
63
+ )
64
+ llm_with_tools = llm.bind_tools(tools)
65
+
66
+ # System message
67
+ original_system_prompt_txt = "You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
68
+ system_prompt_txt = "You are a general AI assistant that uses tools to answer questions. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
69
+
70
+ sys_msg = SystemMessage(system_prompt_txt)
71
+
72
+ # Node
73
+ def assistant(state: MessagesState):
74
+ return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
75
+
76
+ # Build graph
77
+ builder = StateGraph(MessagesState)
78
+ builder.add_node("assistant", assistant)
79
+ builder.add_node("tools", ToolNode(tools))
80
+ builder.add_edge(START, "assistant")
81
+ builder.add_conditional_edges(
82
+ "assistant",
83
+ # If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
84
+ # If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
85
+ tools_condition,
86
+ )
87
+ builder.add_edge("tools", "assistant")
88
+
89
+ # Compile graph
90
+ graph = builder.compile()
91
+
92
+ return graph
93
+
94
+
95
+ # You can run this script directly to test the graph
96
+ # Alternatively in a commandprompt run "langgraph dev" and it will allow you to interact with the graph in a web ui
97
+ if __name__ == "__main__":
98
+ # Build the graph
99
+ graph = create_graph()
100
+ # Run the graph
101
+ question = "What is an elephant? "
102
+ messages = [HumanMessage(content=question)]
103
+ messages = graph.invoke({"messages": messages})
104
+ for m in messages["messages"]:
105
+ m.pretty_print()
106
+
107
+ question = "What is 10+10?"
108
+ messages = [HumanMessage(content=question)]
109
+ messages = graph.invoke({"messages": messages})
110
+ for m in messages["messages"]:
111
+ m.pretty_print()
app.py CHANGED
@@ -3,32 +3,43 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
 
 
15
  print("BasicAgent initialized.")
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
 
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
 
 
 
 
 
 
 
23
  """
24
  Fetches all questions, runs the BasicAgent on them, submits all answers,
25
  and displays the results.
26
  """
27
  # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
 
30
  if profile:
31
- username= f"{profile.username}"
32
  print(f"User logged in: {username}")
33
  else:
34
  print("User not logged in.")
@@ -55,16 +66,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
55
  response.raise_for_status()
56
  questions_data = response.json()
57
  if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
  print(f"Fetched {len(questions_data)} questions.")
61
  except requests.exceptions.RequestException as e:
62
  print(f"Error fetching questions: {e}")
63
  return f"Error fetching questions: {e}", None
64
  except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
  except Exception as e:
69
  print(f"An unexpected error occurred fetching questions: {e}")
70
  return f"An unexpected error occurred fetching questions: {e}", None
@@ -81,18 +92,36 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
81
  continue
82
  try:
83
  submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
 
 
 
 
 
 
 
86
  except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
 
 
 
 
 
89
 
90
  if not answers_payload:
91
  print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
 
 
 
96
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
  print(status_update)
98
 
@@ -162,20 +191,19 @@ with gr.Blocks() as demo:
162
 
163
  run_button = gr.Button("Run Evaluation & Submit All Answers")
164
 
165
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
 
166
  # Removed max_rows=10 from DataFrame constructor
167
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
 
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
173
 
174
  if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
  # Check for SPACE_HOST and SPACE_ID at startup for information
177
  space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
 
180
  if space_host_startup:
181
  print(f"✅ SPACE_HOST found: {space_host_startup}")
@@ -183,14 +211,18 @@ if __name__ == "__main__":
183
  else:
184
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
 
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
  print(f"✅ SPACE_ID found: {space_id_startup}")
188
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
 
 
190
  else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
 
 
192
 
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
 
195
  print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from agent import create_graph
7
+ from langchain_core.messages import HumanMessage
8
+
9
 
10
  # (Keep Constants as is)
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
14
+
15
  # --- Basic Agent Definition ---
16
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
17
  class BasicAgent:
18
  def __init__(self):
19
+ self.graph = create_graph()
20
+
21
  print("BasicAgent initialized.")
22
+
23
  def __call__(self, question: str) -> str:
24
  print(f"Agent received question (first 50 chars): {question[:50]}...")
 
 
 
25
 
26
+ messages = [HumanMessage(content=question)]
27
+ messages = self.graph.invoke({"messages": messages})
28
+ answer = messages["messages"][-1].content
29
+ print(f"Agent returning answer: {answer}")
30
+ return answer
31
+
32
+
33
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
34
  """
35
  Fetches all questions, runs the BasicAgent on them, submits all answers,
36
  and displays the results.
37
  """
38
  # --- Determine HF Space Runtime URL and Repo URL ---
39
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
40
 
41
  if profile:
42
+ username = f"{profile.username}"
43
  print(f"User logged in: {username}")
44
  else:
45
  print("User not logged in.")
 
66
  response.raise_for_status()
67
  questions_data = response.json()
68
  if not questions_data:
69
+ print("Fetched questions list is empty.")
70
+ return "Fetched questions list is empty or invalid format.", None
71
  print(f"Fetched {len(questions_data)} questions.")
72
  except requests.exceptions.RequestException as e:
73
  print(f"Error fetching questions: {e}")
74
  return f"Error fetching questions: {e}", None
75
  except requests.exceptions.JSONDecodeError as e:
76
+ print(f"Error decoding JSON response from questions endpoint: {e}")
77
+ print(f"Response text: {response.text[:500]}")
78
+ return f"Error decoding server response for questions: {e}", None
79
  except Exception as e:
80
  print(f"An unexpected error occurred fetching questions: {e}")
81
  return f"An unexpected error occurred fetching questions: {e}", None
 
92
  continue
93
  try:
94
  submitted_answer = agent(question_text)
95
+ answers_payload.append(
96
+ {"task_id": task_id, "submitted_answer": submitted_answer}
97
+ )
98
+ results_log.append(
99
+ {
100
+ "Task ID": task_id,
101
+ "Question": question_text,
102
+ "Submitted Answer": submitted_answer,
103
+ }
104
+ )
105
  except Exception as e:
106
+ print(f"Error running agent on task {task_id}: {e}")
107
+ results_log.append(
108
+ {
109
+ "Task ID": task_id,
110
+ "Question": question_text,
111
+ "Submitted Answer": f"AGENT ERROR: {e}",
112
+ }
113
+ )
114
 
115
  if not answers_payload:
116
  print("Agent did not produce any answers to submit.")
117
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
118
 
119
+ # 4. Prepare Submission
120
+ submission_data = {
121
+ "username": username.strip(),
122
+ "agent_code": agent_code,
123
+ "answers": answers_payload,
124
+ }
125
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
126
  print(status_update)
127
 
 
191
 
192
  run_button = gr.Button("Run Evaluation & Submit All Answers")
193
 
194
+ status_output = gr.Textbox(
195
+ label="Run Status / Submission Result", lines=5, interactive=False
196
+ )
197
  # Removed max_rows=10 from DataFrame constructor
198
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
199
 
200
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
201
 
202
  if __name__ == "__main__":
203
+ print("\n" + "-" * 30 + " App Starting " + "-" * 30)
204
  # Check for SPACE_HOST and SPACE_ID at startup for information
205
  space_host_startup = os.getenv("SPACE_HOST")
206
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
207
 
208
  if space_host_startup:
209
  print(f"✅ SPACE_HOST found: {space_host_startup}")
 
211
  else:
212
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
213
 
214
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
215
  print(f"✅ SPACE_ID found: {space_id_startup}")
216
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
217
+ print(
218
+ f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
219
+ )
220
  else:
221
+ print(
222
+ "ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
223
+ )
224
 
225
+ print("-" * (60 + len(" App Starting ")) + "\n")
226
 
227
  print("Launching Gradio Interface for Basic Agent Evaluation...")
228
+ demo.launch(debug=True, share=False)
langgraph.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dockerfile_lines": [],
3
+ "graphs": {
4
+ "agent": "./agent.py:create_graph"
5
+ },
6
+ "env": "./.env",
7
+ "python_version": "3.12",
8
+ "dependencies": ["."]
9
+ }
requirements.txt CHANGED
@@ -1,4 +1,9 @@
1
  gradio
2
  requests
3
  gradio[oauth]
4
- python-dotenv
 
 
 
 
 
 
1
  gradio
2
  requests
3
  gradio[oauth]
4
+ python-dotenv
5
+ langgraph
6
+ langchain-core
7
+ langchain-community
8
+ langchain-openai
9
+ langgraph-cli[inmem]