mujtabarizvi commited on
Commit
3bf855f
·
verified ·
1 Parent(s): 370af5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +242 -54
app.py CHANGED
@@ -3,32 +3,214 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
6
 
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
  def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
 
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
  and displays the results.
26
  """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
  if profile:
31
- username= f"{profile.username}"
32
  print(f"User logged in: {username}")
33
  else:
34
  print("User not logged in.")
@@ -38,36 +220,39 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
38
  questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
 
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
 
 
 
 
 
 
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
 
51
  # 2. Fetch Questions
52
  print(f"Fetching questions from: {questions_url}")
53
  try:
54
- response = requests.get(questions_url, timeout=15)
55
  response.raise_for_status()
56
  questions_data = response.json()
57
  if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
  print(f"Fetched {len(questions_data)} questions.")
61
  except requests.exceptions.RequestException as e:
62
  print(f"Error fetching questions: {e}")
63
  return f"Error fetching questions: {e}", None
64
  except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
 
72
  # 3. Run your Agent
73
  results_log = []
@@ -80,18 +265,20 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
 
83
  submitted_answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
86
  except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
 
90
  if not answers_payload:
91
  print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
- # 4. Prepare Submission
95
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
  print(status_update)
@@ -99,7 +286,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
99
  # 5. Submit
100
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
  try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
  response.raise_for_status()
104
  result_data = response.json()
105
  final_status = (
@@ -139,30 +326,28 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
139
  results_df = pd.DataFrame(results_log)
140
  return status_message, results_df
141
 
142
-
143
- # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
  gr.Markdown(
147
  """
148
  **Instructions:**
149
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
150
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
151
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
 
152
  ---
153
  **Disclaimers:**
154
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
155
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
 
 
156
  """
157
  )
158
-
159
  gr.LoginButton()
160
-
161
  run_button = gr.Button("Run Evaluation & Submit All Answers")
162
-
163
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
164
- # Removed max_rows=10 from DataFrame constructor
165
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
166
 
167
  run_button.click(
168
  fn=run_and_submit_all,
@@ -171,24 +356,27 @@ with gr.Blocks() as demo:
171
 
172
  if __name__ == "__main__":
173
  print("\n" + "-"*30 + " App Starting " + "-"*30)
174
- # Check for SPACE_HOST and SPACE_ID at startup for information
175
  space_host_startup = os.getenv("SPACE_HOST")
176
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
177
-
178
  if space_host_startup:
179
  print(f"✅ SPACE_HOST found: {space_host_startup}")
180
  print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
181
  else:
182
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
183
-
184
- if space_id_startup: # Print repo URLs if SPACE_ID is found
185
  print(f"✅ SPACE_ID found: {space_id_startup}")
186
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
187
  print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
188
  else:
189
  print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
190
 
191
- print("-"*(60 + len(" App Starting ")) + "\n")
 
 
 
 
 
192
 
193
- print("Launching Gradio Interface for Basic Agent Evaluation...")
 
194
  demo.launch(debug=True, share=False)
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ import re # For parsing LLM output
7
 
8
+ # --- HF Inference API for LLM ---
9
+ from huggingface_hub import HfInference
10
+ # You can choose a different model, but make sure it's good at instruction following and ReAct-style prompting.
11
+ # Zephyr-7B-beta or Mistral-7B-Instruct are good choices available on the free inference API.
12
+ # Starling-LM-7B-beta is also excellent if available and performant enough.
13
+ LLM_MODEL = "HuggingFaceH4/zephyr-7b-beta" # or "mistralai/Mistral-7B-Instruct-v0.2"
14
+ # Ensure you have a Hugging Face token set in your space's secrets if using certain models,
15
+ # though many popular ones work without it for basic inference.
16
+ # Name: HF_TOKEN, Value: your_hf_token_here (with read access is usually enough for inference)
17
+ try:
18
+ hf_token = os.getenv("HF_TOKEN")
19
+ llm_client = HfInference(model=LLM_MODEL, token=hf_token)
20
+ except Exception as e:
21
+ print(f"Error initializing HfInference client: {e}")
22
+ llm_client = None
23
+
24
+ # --- Tools ---
25
+ # 1. Search Tool (using DuckDuckGo)
26
+ from duckduckgo_search import DDGS
27
+
28
+ def search_tool(query: str) -> str:
29
+ """
30
+ Searches the web using DuckDuckGo for a given query and returns the top results.
31
+ Args:
32
+ query (str): The search query.
33
+ Returns:
34
+ str: A string containing the search results.
35
+ """
36
+ print(f"Tool: search_tool, Query: {query}")
37
+ try:
38
+ with DDGS() as ddgs:
39
+ results = ddgs.text(query, max_results=3) # Get top 3 results
40
+ if results:
41
+ return "\n".join([f"Title: {r['title']}\nSnippet: {r['body']}\nURL: {r['href']}" for r in results])
42
+ else:
43
+ return "No results found for your query."
44
+ except Exception as e:
45
+ print(f"Error in search_tool: {e}")
46
+ return f"Error performing search: {str(e)}"
47
+
48
+ # 2. Calculator Tool
49
+ def calculator_tool(expression: str) -> str:
50
+ """
51
+ Calculates the result of a mathematical expression.
52
+ Args:
53
+ expression (str): The mathematical expression to evaluate (e.g., "2+2", "100*3.14/4").
54
+ It should be a valid Python-evaluable expression.
55
+ Returns:
56
+ str: The result of the calculation or an error message.
57
+ """
58
+ print(f"Tool: calculator_tool, Expression: {expression}")
59
+ try:
60
+ # Basic security: allow only numbers, operators, parentheses, and math functions.
61
+ # This is not perfectly secure for a public-facing app with arbitrary eval,
62
+ # but for this constrained GAIA context, it's a common approach.
63
+ # A safer approach would be to use a dedicated math parsing library.
64
+ allowed_chars = "0123456789+-*/(). "
65
+ if not all(char in allowed_chars or char.isspace() for char in expression):
66
+ # A more robust check would involve parsing the expression.
67
+ # For now, we'll allow what seems reasonable for GAIA math.
68
+ # Let's try to evaluate common math patterns more safely.
69
+ # This simple check is insufficient for true security.
70
+ pass # Relaxing this for now to allow GAIA questions like "sqrt(16)" etc.
71
+
72
+ # A slightly safer eval using a limited global scope
73
+ # For GAIA, often questions involve simple arithmetic or known constants like pi.
74
+ # This eval is still risky; a dedicated math expression parser is better for production.
75
+ result = eval(expression, {"__builtins__": {}}, {"sqrt": lambda x: x**0.5, "pi": 3.1415926535})
76
+ return str(result)
77
+ except Exception as e:
78
+ print(f"Error in calculator_tool: {e}")
79
+ return f"Error calculating: {str(e)}. Ensure the expression is valid math."
80
+
81
+ # --- Agent Definition ---
82
+ class ReActAgent:
83
+ def __init__(self, llm_client, tools: dict, max_iterations=7):
84
+ print("ReActAgent initialized.")
85
+ if llm_client is None:
86
+ raise ValueError("LLM client not initialized. Check HF_TOKEN and model availability.")
87
+ self.llm = llm_client
88
+ self.tools = tools
89
+ self.max_iterations = max_iterations
90
+ self.stop_pattern = "Final Answer:"
91
+
92
+ # Construct tool descriptions for the prompt
93
+ self.tool_descriptions = "\n".join([
94
+ f"- {name}: {inspect.getdoc(func)}"
95
+ for name, func in tools.items()
96
+ ])
97
+ self.tool_names = ", ".join(tools.keys())
98
+
99
+ # This is the core ReAct prompt template
100
+ self.react_prompt_template = inspect.cleandoc(f"""
101
+ You are a helpful and observant AI assistant. Your goal is to answer the following question accurately.
102
+ You must use a step-by-step thinking process (Thought, Action, Observation).
103
+
104
+ Available tools:
105
+ {self.tool_descriptions}
106
+
107
+ Use the following format:
108
+ Question: the input question you must answer
109
+ Thought: You should always think about what to do.
110
+ Action: The action to take, should be one of [{self.tool_names}]. The input to the tool is between the brackets. For example: search_tool[query] or calculator_tool[expression].
111
+ Observation: The result of the action.
112
+ ... (this Thought/Action/Observation sequence can repeat up to {self.max_iterations} times)
113
+ Thought: I now know the final answer.
114
+ Final Answer: The final answer to the original input question.
115
+
116
+ Begin!
117
+ """) + "\nQuestion: {question}\n{scratchpad}"
118
+
119
+
120
+ def run_llm(self, prompt: str) -> str:
121
+ try:
122
+ # print(f"\n--- LLM Prompt ---\n{prompt}\n--- End LLM Prompt ---")
123
+ # Parameters for the LLM call
124
+ # `max_new_tokens` is important to give the LLM enough space to think and provide an answer.
125
+ # `temperature` can be low for more deterministic ReAct steps.
126
+ # `stop_sequences` can help control generation if the model supports it well.
127
+ response = self.llm.text_generation(
128
+ prompt,
129
+ max_new_tokens=512, # Increased to allow for longer thought processes
130
+ temperature=0.2, # Lower for more factual/less creative ReAct steps
131
+ do_sample=True, # Required if temperature is not 1.0
132
+ # stop_sequences=["Observation:", "\nThought:", self.stop_pattern] # Helps stop at logical points
133
+ # Using stop_sequences can be tricky and model-dependent. Simpler to parse output.
134
+ )
135
+ # print(f"--- LLM Raw Response ---\n{response}\n--- End LLM Raw Response ---")
136
+ return response.strip()
137
+ except Exception as e:
138
+ print(f"Error during LLM call: {e}")
139
+ return f"Error generating response: {str(e)}"
140
 
 
 
 
 
 
141
  def __call__(self, question: str) -> str:
142
+ print(f"ReActAgent received question (first 100 chars): {question[:100]}...")
143
+
144
+ scratchpad = ""
145
+ current_prompt = self.react_prompt_template.format(question=question, scratchpad=scratchpad)
146
 
147
+ for i in range(self.max_iterations):
148
+ print(f"\nIteration {i+1}")
149
+ llm_output = self.run_llm(current_prompt)
150
+
151
+ if not llm_output: # Handle cases where LLM returns empty or error
152
+ print("LLM returned empty or error, stopping.")
153
+ return "Agent Error: LLM failed to respond."
154
+
155
+ scratchpad += llm_output + "\n" # Add LLM's entire unfiltered output to scratchpad
156
+
157
+ # Check for Final Answer
158
+ final_answer_match = re.search(r"Final Answer:\s*(.*)", llm_output, re.DOTALL | re.IGNORECASE)
159
+ if final_answer_match:
160
+ answer = final_answer_match.group(1).strip()
161
+ print(f"Found Final Answer: {answer}")
162
+ return answer
163
+
164
+ # Parse Action
165
+ # Regex to capture: Action: tool_name[input]
166
+ action_match = re.search(r"Action:\s*([a-zA-Z_0-9]+)\[(.*?)\]", llm_output, re.DOTALL)
167
+ if action_match:
168
+ tool_name = action_match.group(1).strip()
169
+ tool_input = action_match.group(2).strip()
170
+
171
+ if tool_name in self.tools:
172
+ print(f"Executing Tool: {tool_name}, Input: {tool_input}")
173
+ try:
174
+ observation = self.tools[tool_name](tool_input)
175
+ except Exception as e:
176
+ observation = f"Error executing tool {tool_name}: {e}"
177
+ print(f"Observation: {observation[:200]}...") # Print truncated observation
178
+ scratchpad += f"Observation: {observation}\n"
179
+ else:
180
+ print(f"Unknown tool: {tool_name}")
181
+ scratchpad += f"Observation: Error - Unknown tool '{tool_name}'. Available tools: {self.tool_names}\n"
182
+ else:
183
+ # If no action, it might be just a thought, or malformed. Add the thought to scratchpad.
184
+ # Or it might be the LLM directly trying to answer without "Final Answer:"
185
+ # We assume the LLM is trying to continue the thought process or has given up.
186
+ print("No valid action found in LLM output for this iteration.")
187
+ # If the LLM isn't producing actions, it might be stuck or directly answering.
188
+ # We will let the loop continue, hoping it recovers or hits max_iterations/Final Answer.
189
+ # If it's a malformed output that isn't a Final Answer, it will just be added to scratchpad.
190
+
191
+ current_prompt = self.react_prompt_template.format(question=question, scratchpad=scratchpad)
192
+
193
+ print("Max iterations reached. Returning current scratchpad or best guess.")
194
+ # If max iterations reached without "Final Answer:", try to extract a plausible answer from the last thought
195
+ # or just return a message. This is a fallback.
196
+ last_thought_match = re.findall(r"Thought:\s*(.*)", scratchpad, re.IGNORECASE)
197
+ if last_thought_match:
198
+ return f"Max iterations reached. Last thought: {last_thought_match[-1].strip()}"
199
+ return "Agent failed to find an answer within the iteration limit."
200
+
201
+
202
+ # --- Constants (from template) ---
203
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
204
+
205
+ # --- Main Execution Logic (from template, modified to use ReActAgent) ---
206
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
207
  """
208
+ Fetches all questions, runs the ReActAgent on them, submits all answers,
209
  and displays the results.
210
  """
211
+ space_id = os.getenv("SPACE_ID")
 
 
212
  if profile:
213
+ username = f"{profile.username}"
214
  print(f"User logged in: {username}")
215
  else:
216
  print("User not logged in.")
 
220
  questions_url = f"{api_url}/questions"
221
  submit_url = f"{api_url}/submit"
222
 
223
+ # 1. Instantiate Agent
224
  try:
225
+ available_tools = {
226
+ "search_tool": search_tool,
227
+ "calculator_tool": calculator_tool,
228
+ }
229
+ if llm_client is None: # Check if llm_client was initialized
230
+ return "LLM Client could not be initialized. Check logs and HF_TOKEN.", None
231
+ agent = ReActAgent(llm_client=llm_client, tools=available_tools)
232
  except Exception as e:
233
  print(f"Error instantiating agent: {e}")
234
  return f"Error initializing agent: {e}", None
235
+
236
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Code not available (SPACE_ID not set)"
237
+ print(f"Agent code link: {agent_code}")
238
 
239
  # 2. Fetch Questions
240
  print(f"Fetching questions from: {questions_url}")
241
  try:
242
+ response = requests.get(questions_url, timeout=20) # Increased timeout
243
  response.raise_for_status()
244
  questions_data = response.json()
245
  if not questions_data:
246
+ print("Fetched questions list is empty.")
247
+ return "Fetched questions list is empty or invalid format.", None
248
  print(f"Fetched {len(questions_data)} questions.")
249
  except requests.exceptions.RequestException as e:
250
  print(f"Error fetching questions: {e}")
251
  return f"Error fetching questions: {e}", None
252
  except requests.exceptions.JSONDecodeError as e:
253
+ print(f"Error decoding JSON response from questions endpoint: {e}")
254
+ print(f"Response text: {response.text[:500]}")
255
+ return f"Error decoding server response for questions: {e}", None
 
 
 
256
 
257
  # 3. Run your Agent
258
  results_log = []
 
265
  print(f"Skipping item with missing task_id or question: {item}")
266
  continue
267
  try:
268
+ print(f"\n--- Processing Task ID: {task_id}, Question: {question_text[:100]}... ---")
269
  submitted_answer = agent(question_text)
270
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
271
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
272
+ print(f"Agent answer for task {task_id}: {submitted_answer[:100]}...")
273
  except Exception as e:
274
+ print(f"Error running agent on task {task_id}: {e}")
275
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
276
 
277
  if not answers_payload:
278
  print("Agent did not produce any answers to submit.")
279
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
280
 
281
+ # 4. Prepare Submission
282
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
283
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
284
  print(status_update)
 
286
  # 5. Submit
287
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
288
  try:
289
+ response = requests.post(submit_url, json=submission_data, timeout=120) # Increased timeout for submission
290
  response.raise_for_status()
291
  result_data = response.json()
292
  final_status = (
 
326
  results_df = pd.DataFrame(results_log)
327
  return status_message, results_df
328
 
329
+ # --- Build Gradio Interface using Blocks (from template) ---
 
330
  with gr.Blocks() as demo:
331
+ gr.Markdown("# ReAct Agent Evaluation Runner (GAIA Modified)")
332
  gr.Markdown(
333
  """
334
  **Instructions:**
335
+ 1. This Space implements a ReAct (Reasoning-Action) agent using an LLM from the Hugging Face Inference API.
336
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
337
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
338
+ 4. The agent uses a search tool (DuckDuckGo) and a calculator tool.
339
  ---
340
  **Disclaimers:**
341
+ * LLM responses can be slow, and running through all questions will take time.
342
+ * The agent's performance depends heavily on the chosen LLM and the quality of its ReAct prompting.
343
+ * You may need to set an `HF_TOKEN` in your Space secrets if you use a gated model or encounter rate limits.
344
+ * The calculator tool uses `eval()` which has security implications if not carefully managed. For this specific benchmark it is a common simplification.
345
  """
346
  )
 
347
  gr.LoginButton()
 
348
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
349
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
350
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) # Removed max_rows
 
351
 
352
  run_button.click(
353
  fn=run_and_submit_all,
 
356
 
357
  if __name__ == "__main__":
358
  print("\n" + "-"*30 + " App Starting " + "-"*30)
 
359
  space_host_startup = os.getenv("SPACE_HOST")
360
+ space_id_startup = os.getenv("SPACE_ID")
 
361
  if space_host_startup:
362
  print(f"✅ SPACE_HOST found: {space_host_startup}")
363
  print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
364
  else:
365
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
366
+ if space_id_startup:
 
367
  print(f"✅ SPACE_ID found: {space_id_startup}")
368
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
369
  print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
370
  else:
371
  print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
372
 
373
+ if llm_client is None:
374
+ print("⚠️ LLM Client (HfInference) was not initialized. The agent will not work.")
375
+ print(" Please check if you need to set the HF_TOKEN secret in your Space settings,")
376
+ print(f" and ensure the model '{LLM_MODEL}' is accessible via the Inference API.")
377
+ else:
378
+ print(f"✅ LLM Client initialized with model: {LLM_MODEL}")
379
 
380
+ print("-"*(60 + len(" App Starting ")) + "\n")
381
+ print("Launching Gradio Interface for ReAct Agent Evaluation...")
382
  demo.launch(debug=True, share=False)