Gabe commited on
Commit
9e3bc51
·
1 Parent(s): d52c683

Add full langgraph agent with all tools

Browse files
Files changed (2) hide show
  1. app.py +306 -21
  2. requirements.txt +14 -1
app.py CHANGED
@@ -3,21 +3,302 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
 
14
  def __init__(self):
15
- print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -26,27 +307,29 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
26
  """
27
  # --- Determine HF Space Runtime URL and Repo URL ---
28
  space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
  if profile:
31
  username= f"{profile.username}"
32
  print(f"User logged in: {username}")
33
  else:
34
  print("User not logged in.")
35
  return "Please Login to Hugging Face with the button.", None
36
-
37
  api_url = DEFAULT_API_URL
38
  questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
 
41
  # 1. Instantiate Agent ( modify this part to create your agent)
 
42
  try:
43
  agent = BasicAgent()
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
 
 
47
  # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
 
51
  # 2. Fetch Questions
52
  print(f"Fetching questions from: {questions_url}")
@@ -73,20 +356,27 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
73
  results_log = []
74
  answers_payload = []
75
  print(f"Running agent on {len(questions_data)} questions...")
76
- for item in questions_data:
 
 
 
 
77
  task_id = item.get("task_id")
78
  question_text = item.get("question")
79
  if not task_id or question_text is None:
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
 
 
82
  try:
83
  submitted_answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
86
  except Exception as e:
87
  print(f"Error running agent on task {task_id}: {e}")
88
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
  if not answers_payload:
91
  print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
@@ -139,33 +429,27 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
139
  results_df = pd.DataFrame(results_log)
140
  return status_message, results_df
141
 
142
-
143
  # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
145
  gr.Markdown("# Basic Agent Evaluation Runner")
146
  gr.Markdown(
147
  """
148
  **Instructions:**
149
-
150
  1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
  ---
155
  **Disclaimers:**
156
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
  This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
  """
159
  )
160
-
161
  gr.LoginButton()
162
-
163
  run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
  # Removed max_rows=10 from DataFrame constructor
167
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
  run_button.click(
170
  fn=run_and_submit_all,
171
  outputs=[status_output, results_table]
@@ -173,24 +457,25 @@ with gr.Blocks() as demo:
173
 
174
  if __name__ == "__main__":
175
  print("\n" + "-"*30 + " App Starting " + "-"*30)
 
176
  # Check for SPACE_HOST and SPACE_ID at startup for information
177
  space_host_startup = os.getenv("SPACE_HOST")
178
  space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
  if space_host_startup:
181
  print(f"✅ SPACE_HOST found: {space_host_startup}")
182
  print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
  else:
184
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
-
186
  if space_id_startup: # Print repo URLs if SPACE_ID is found
187
  print(f"✅ SPACE_ID found: {space_id_startup}")
188
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
  print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
  else:
191
  print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
  print("-"*(60 + len(" App Starting ")) + "\n")
194
-
195
  print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ import io
7
+ import contextlib
8
+ from typing import TypedDict, Annotated
9
+ import torch
10
+
11
+ # --- Multimodal & Web Tool Imports ---
12
+ from transformers import pipeline
13
+ from youtube_transcript_api import YouTubeTranscriptApi
14
+ import requests
15
+ from bs4 import BeautifulSoup
16
+
17
+ # --- LangChain & LangGraph Imports ---
18
+ from langgraph.graph.message import add_messages
19
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, ToolMessage
20
+ from langgraph.prebuilt import ToolNode
21
+ from langgraph.graph import START, StateGraph
22
+ from langgraph.prebuilt import tools_condition
23
+ from langchain_huggingface import ChatHuggingFace
24
+ from langchain_huggingface import HuggingFaceEndpoint
25
+ from langchain_community.tools import DuckDuckGoSearchRun
26
+ from langchain_core.tools import tool
27
 
28
  # (Keep Constants as is)
29
  # --- Constants ---
30
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
31
 
32
+ # --- Initialize ASR Pipeline (for Audio Tool) ---
33
+ # Load the model once when the app starts for efficiency
34
+ try:
35
+ asr_pipeline = pipeline(
36
+ "automatic-speech-recognition",
37
+ model="openai/whisper-base",
38
+ torch_dtype=torch.float16, # Use float16 for faster inference
39
+ device_map="auto" # Use GPU if available
40
+ )
41
+ print("✅ ASR (Whisper) pipeline loaded successfully.")
42
+ except Exception as e:
43
+ print(f"⚠️ Warning: Could not load ASR pipeline. Audio tool will not work. Error: {e}")
44
+ asr_pipeline = None
45
+
46
+ # --- Tool Definitions ---
47
+
48
+ @tool
49
+ def search_tool(query: str) -> str:
50
+ """Calls DuckDuckGo search and returns the results."""
51
+ print(f"--- Calling Search Tool with query: {query} ---")
52
+ try:
53
+ search = DuckDuckGoSearchRun()
54
+ return search.run(query)
55
+ except Exception as e:
56
+ return f"Error running search: {e}"
57
+
58
+ @tool
59
+ def code_interpreter(code: str) -> str:
60
+ """
61
+ Executes a string of Python code and returns its stdout, stderr, and any error.
62
+ Use this for calculations, data manipulation, or any other Python operation.
63
+ The code runs in a sandboxed environment.
64
+ Note: 'pandas' and 'openpyxl' are available.
65
+ """
66
+ print(f"--- Calling Code Interpreter with code:\n{code}\n---")
67
+ output_stream = io.StringIO()
68
+ error_stream = io.StringIO()
69
+
70
+ try:
71
+ # Use contextlib to redirect stdout and stderr
72
+ with contextlib.redirect_stdout(output_stream), contextlib.redirect_stderr(error_stream):
73
+ # Execute the code. Provide 'pd' (pandas) in the globals
74
+ exec(code, {"pd": pd}, {})
75
+
76
+ stdout = output_stream.getvalue()
77
+ stderr = error_stream.getvalue()
78
+
79
+ if stderr:
80
+ return f"Error: {stderr}\nStdout: {stdout}"
81
+ return f"Success:\n{stdout}"
82
+
83
+ except Exception as e:
84
+ # Capture any exception during exec
85
+ return f"Execution failed with error: {str(e)}"
86
+
87
+ @tool
88
+ def read_file(path: str) -> str:
89
+ """Reads the content of a file at the specified path."""
90
+ print(f"--- Calling Read File Tool at path: {path} ---")
91
+ try:
92
+ with open(path, 'r', encoding='utf-8') as f:
93
+ return f.read()
94
+ except Exception as e:
95
+ return f"Error reading file {path}: {str(e)}"
96
+
97
+ @tool
98
+ def write_file(path: str, content: str) -> str:
99
+ """Writes the given content to a file at the specified path."""
100
+ print(f"--- Calling Write File Tool at path: {path} ---")
101
+ try:
102
+ # Ensure the directory exists
103
+ os.makedirs(os.path.dirname(path), exist_ok=True)
104
+
105
+ with open(path, 'w', encoding='utf-8') as f:
106
+ f.write(content)
107
+ return f"Successfully wrote to file {path}."
108
+ except Exception as e:
109
+ return f"Error writing to file {path}: {str(e)}"
110
+
111
+ @tool
112
+ def list_directory(path: str = ".") -> str:
113
+ """Lists the contents of a directory at the specified path."""
114
+ print(f"--- Calling List Directory Tool at path: {path} ---")
115
+ try:
116
+ files = os.listdir(path)
117
+ return "\n".join(files) if files else "Directory is empty."
118
+ except Exception as e:
119
+ return f"Error listing directory {path}: {str(e)}"
120
+
121
+ @tool
122
+ def audio_transcription_tool(file_path: str) -> str:
123
+ """
124
+ Transcribes an audio file (like .mp3 or .wav) and returns the text.
125
+ """
126
+ print(f"--- Calling Audio Transcription Tool at path: {file_path} ---")
127
+ if not asr_pipeline:
128
+ return "Error: Audio transcription pipeline is not available."
129
+ try:
130
+ if not os.path.exists(file_path):
131
+ # GAIA questions might provide relative paths, so we check
132
+ if os.path.exists(os.path.basename(file_path)):
133
+ file_path = os.path.basename(file_path)
134
+ else:
135
+ return f"Error: File not found at {file_path}"
136
+
137
+ # The pipeline handles file loading
138
+ transcription = asr_pipeline(file_path)
139
+ print("--- Transcription Complete ---")
140
+ return transcription["text"]
141
+ except Exception as e:
142
+ return f"Error during audio transcription: {str(e)}"
143
+
144
+ @tool
145
+ def get_youtube_transcript(video_url: str) -> str:
146
+ """
147
+ Fetches the transcript for a given YouTube video URL.
148
+ """
149
+ print(f"--- Calling YouTube Transcript Tool for URL: {video_url} ---")
150
+ try:
151
+ # Extract video ID from URL
152
+ video_id = video_url.split("v=")[1].split("&")[0]
153
+ transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
154
+
155
+ # Combine all transcript parts into one string
156
+ full_transcript = " ".join([item["text"] for item in transcript_list])
157
+ print("--- Transcript Fetched ---")
158
+ return full_transcript
159
+ except Exception as e:
160
+ return f"Error fetching YouTube transcript: {str(e)}"
161
+
162
+ @tool
163
+ def scrape_web_page(url: str) -> str:
164
+ """
165
+ Fetches the full text content of a given web page URL.
166
+ """
167
+ print(f"--- Calling Web Scraper Tool for URL: {url} ---")
168
+ try:
169
+ response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}, timeout=10)
170
+ response.raise_for_status() # Raise an error for bad responses
171
+
172
+ soup = BeautifulSoup(response.text, 'html.parser')
173
+
174
+ # Remove script/style tags
175
+ for script in soup(["script", "style", "nav", "footer", "aside"]):
176
+ script.extract()
177
+
178
+ text = soup.get_text()
179
+
180
+ # Clean up whitespace
181
+ lines = (line.strip() for line in text.splitlines())
182
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
183
+ text = '\n'.join(chunk for chunk in chunks if chunk)
184
+ print("--- Web Page Scraped ---")
185
+ return text[:8000] # Return first 8000 chars to avoid overload
186
+ except Exception as e:
187
+ return f"Error scraping web page: {str(e)}"
188
+
189
+ # --- End of Tool Definitions ---
190
+
191
+
192
+ # --- LangGraph Agent State ---
193
+ class AgentState(TypedDict):
194
+ messages: Annotated[list[AnyMessage], add_messages]
195
+
196
+
197
  # --- Basic Agent Definition ---
198
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
199
  class BasicAgent:
200
+
201
  def __init__(self):
202
+ print("BasicAgent (LangGraph) initialized.")
203
+
204
+ # 1. Get API Token from Space Secrets
205
+ # Go to your Space's Settings -> Secrets and add HUGGINGFACEHUB_API_TOKEN
206
+ HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
207
+ if not HUGGINGFACEHUB_API_TOKEN:
208
+ raise ValueError("HUGGINGFACEHUB_API_TOKEN secret is not set! Please add it to your Space secrets.")
209
+
210
+ # 2. Initialize Tools
211
+ self.tools = [
212
+ search_tool,
213
+ code_interpreter,
214
+ read_file,
215
+ write_file,
216
+ list_directory,
217
+ audio_transcription_tool,
218
+ get_youtube_transcript,
219
+ scrape_web_page
220
+ ]
221
+
222
+ # 3. Initialize the LLM
223
+ # We wrap HuggingFaceEndpoint in ChatHuggingFace for LangChain compatibility
224
+ llm = HuggingFaceEndpoint(
225
+ repo_id="HuggingFaceH4/zephyr-7b-beta", # A good, fast model for tool use
226
+ # repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", # Your chosen model
227
+ huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
228
+ max_new_tokens=1500,
229
+ temperature=0.1,
230
+ )
231
+ chat_llm = ChatHuggingFace(llm=llm)
232
+
233
+ # 4. Bind tools to the LLM
234
+ self.llm_with_tools = chat_llm.bind_tools(self.tools)
235
+
236
+ # 5. Define the Agent Node
237
+ def agent_node(state: AgentState):
238
+ print("--- Running Agent Node ---")
239
+ ai_message = self.llm_with_tools.invoke(state["messages"])
240
+ print(f"AI Message: {ai_message.pretty_repr()}")
241
+ return {"messages": [ai_message]}
242
+
243
+ # 6. Define the Tool Node
244
+ tool_node = ToolNode(self.tools)
245
+
246
+ # 7. Create the Graph
247
+ graph_builder = StateGraph(AgentState)
248
+
249
+ # Add the nodes
250
+ graph_builder.add_node("agent", agent_node)
251
+ graph_builder.add_node("tools", tool_node)
252
+
253
+ # Define the edges
254
+ graph_builder.add_edge(START, "agent")
255
+
256
+ # Add the conditional edge
257
+ graph_builder.add_conditional_edges(
258
+ "agent",
259
+ tools_condition,
260
+ {
261
+ "tools": "tools",
262
+ "__end__": "__end__",
263
+ },
264
+ )
265
+ graph_builder.add_edge("tools", "agent")
266
+
267
+ # 8. Compile the graph and store it
268
+ self.graph = graph_builder.compile()
269
+ print("Graph compiled successfully with all tools.")
270
+
271
  def __call__(self, question: str) -> str:
272
  print(f"Agent received question (first 50 chars): {question[:50]}...")
273
+
274
+ # Prepare the input for the graph
275
+ graph_input = {"messages": [HumanMessage(content=question)]}
276
+
277
+ final_answer = ""
278
+
279
+ # Stream the graph's execution
280
+ try:
281
+ # We use stream_mode="values" to get the full state at each step
282
+ for event in self.graph.stream(graph_input, stream_mode="values"):
283
+ last_message = event["messages"][-1]
284
+
285
+ # Update the final answer with the latest AI message
286
+ if isinstance(last_message, AIMessage):
287
+ if last_message.content:
288
+ print(f"AI: {last_message.content[:200]}...")
289
+ final_answer = last_message.content
290
+ elif isinstance(last_message, ToolMessage):
291
+ print(f"Tool Result: {last_message.content[:200]}...")
292
+
293
+ print(f"Agent returning final answer: {final_answer}")
294
+ return final_answer
295
+
296
+ except Exception as e:
297
+ print(f"Error running agent graph: {e}")
298
+ return f"AGENT ERROR: {e}"
299
+
300
+
301
+ # --- (Original Template Code Starts Here) ---
302
 
303
  def run_and_submit_all( profile: gr.OAuthProfile | None):
304
  """
 
307
  """
308
  # --- Determine HF Space Runtime URL and Repo URL ---
309
  space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
310
  if profile:
311
  username= f"{profile.username}"
312
  print(f"User logged in: {username}")
313
  else:
314
  print("User not logged in.")
315
  return "Please Login to Hugging Face with the button.", None
316
+
317
  api_url = DEFAULT_API_URL
318
  questions_url = f"{api_url}/questions"
319
  submit_url = f"{api_url}/submit"
320
 
321
  # 1. Instantiate Agent ( modify this part to create your agent)
322
+ print("Initializing agent...")
323
  try:
324
  agent = BasicAgent()
325
  except Exception as e:
326
  print(f"Error instantiating agent: {e}")
327
  return f"Error initializing agent: {e}", None
328
+ print("Agent initialized successfully.")
329
+
330
  # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
331
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
332
+ print(f"Agent code URL: {agent_code}")
333
 
334
  # 2. Fetch Questions
335
  print(f"Fetching questions from: {questions_url}")
 
356
  results_log = []
357
  answers_payload = []
358
  print(f"Running agent on {len(questions_data)} questions...")
359
+
360
+ # Set a limit for testing. Remove '[:question_limit]' for the full submission.
361
+ # question_limit = 10
362
+
363
+ for item in questions_data: # [:question_limit]: # Using limit here
364
  task_id = item.get("task_id")
365
  question_text = item.get("question")
366
  if not task_id or question_text is None:
367
  print(f"Skipping item with missing task_id or question: {item}")
368
  continue
369
+
370
+ print(f"\n--- Running Task {task_id} ---")
371
  try:
372
  submitted_answer = agent(question_text)
373
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
374
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
375
+ print(f"--- Task {task_id} Complete ---")
376
  except Exception as e:
377
  print(f"Error running agent on task {task_id}: {e}")
378
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
379
+
380
  if not answers_payload:
381
  print("Agent did not produce any answers to submit.")
382
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
 
429
  results_df = pd.DataFrame(results_log)
430
  return status_message, results_df
431
 
 
432
  # --- Build Gradio Interface using Blocks ---
433
  with gr.Blocks() as demo:
434
  gr.Markdown("# Basic Agent Evaluation Runner")
435
  gr.Markdown(
436
  """
437
  **Instructions:**
 
438
  1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
439
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
440
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
 
441
  ---
442
  **Disclaimers:**
443
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
444
  This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
445
  """
446
  )
 
447
  gr.LoginButton()
 
448
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
449
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
450
  # Removed max_rows=10 from DataFrame constructor
451
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
452
+
453
  run_button.click(
454
  fn=run_and_submit_all,
455
  outputs=[status_output, results_table]
 
457
 
458
  if __name__ == "__main__":
459
  print("\n" + "-"*30 + " App Starting " + "-"*30)
460
+
461
  # Check for SPACE_HOST and SPACE_ID at startup for information
462
  space_host_startup = os.getenv("SPACE_HOST")
463
  space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
464
+
465
  if space_host_startup:
466
  print(f"✅ SPACE_HOST found: {space_host_startup}")
467
  print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
468
  else:
469
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
470
+
471
  if space_id_startup: # Print repo URLs if SPACE_ID is found
472
  print(f"✅ SPACE_ID found: {space_id_startup}")
473
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
474
  print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
475
  else:
476
  print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
477
+
478
  print("-"*(60 + len(" App Starting ")) + "\n")
 
479
  print("Launching Gradio Interface for Basic Agent Evaluation...")
480
+ demo.launch(debug=True, share=False)
481
+
requirements.txt CHANGED
@@ -1,2 +1,15 @@
1
  gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ pandas
4
+ langchain
5
+ langgraph
6
+ langchain-huggingface
7
+ langchain-community
8
+ duckduckgo-search
9
+ torch
10
+ transformers
11
+ torchaudio
12
+ librosa
13
+ youtube-transcript-api
14
+ beautifulsoup4
15
+ openpyxl