AlexDGenu commited on
Commit
3528f7a
·
1 Parent(s): 88624d5

Refactor SmolAgent to utilize Hugging Face's InferenceClient for direct model inference, simplifying response handling and improving error reporting. Update requirements to remove smolagents package.

Browse files
Files changed (2) hide show
  1. app.py +28 -30
  2. requirements.txt +1 -3
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from smolagents import CodeAgent, InferenceClientModel
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
@@ -20,44 +20,42 @@ class SmolAgent:
20
  if not hf_token:
21
  raise ValueError("Hugging Face token not found. Please set HF_TOKEN environment variable in HF Spaces settings.")
22
 
23
- # 3. Initialize the SmolLM model
24
- model = InferenceClientModel(
25
- model_id="HuggingFaceTB/SmolLM-1.7B-Instruct",
26
  token=hf_token,
27
  )
28
-
29
- # 4. Replace your current BasicAgent with a smolagents.CodeAgent
30
- self._agent = CodeAgent(
31
- tools=[],
32
- model=model,
33
- instructions=SYSTEM_PROMPT,
34
- )
35
- print("SmolAgent initialized.")
36
 
37
  def __call__(self, question: str) -> str:
38
- print(f"Agent received question (first 50 chars): {question[:50]}...")
 
39
  try:
40
- # The system prompt is already set, just pass the question.
41
- full_response = self._agent.run(question)
42
- print(f"Agent full response: {full_response}")
43
-
44
- # Parse the response to extract the final answer
45
- if "FINAL ANSWER:" in full_response:
46
- # Split and get the part after "FINAL ANSWER:"
47
- final_answer_part = full_response.split("FINAL ANSWER:")[1]
48
- # Remove brackets if they exist and strip whitespace
 
 
 
 
49
  final_answer = final_answer_part.strip()
50
  if final_answer.startswith('[') and final_answer.endswith(']'):
51
  final_answer = final_answer[1:-1]
52
-
53
- print(f"Agent returning parsed answer: {final_answer}")
54
  return final_answer
55
  else:
56
- print("Warning: 'FINAL ANSWER:' not found in response. Returning full response.")
57
- return full_response
58
-
59
  except Exception as e:
60
- print(f"Error running agent: {e}")
 
 
61
  return f"AGENT ERROR: {e}"
62
 
63
 
@@ -189,14 +187,14 @@ with gr.Blocks() as demo:
189
  """
190
  **Instructions:**
191
 
192
- 1. This space uses SmolLM-1.7B-Instruct model with smolagents for question answering.
193
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
194
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
195
 
196
  ---
197
  **Model Information:**
198
  - Using: HuggingFaceTB/SmolLM-1.7B-Instruct
199
- - Framework: smolagents CodeAgent
200
  - No additional tools (pure reasoning)
201
 
202
  **Disclaimers:**
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from huggingface_hub import InferenceClient
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
 
20
  if not hf_token:
21
  raise ValueError("Hugging Face token not found. Please set HF_TOKEN environment variable in HF Spaces settings.")
22
 
23
+ self.client = InferenceClient(
24
+ model="HuggingFaceTB/SmolLM-1.7B-Instruct",
 
25
  token=hf_token,
26
  )
27
+ print("SmolAgent initialized with direct inference client.")
 
 
 
 
 
 
 
28
 
29
  def __call__(self, question: str) -> str:
30
+ prompt = f"{SYSTEM_PROMPT}\n\nQuestion: {question}\n\nAnswer:"
31
+ print(f"\n🪐 Running on question:\n{question}\n")
32
  try:
33
+ response = self.client.text_generation(
34
+ prompt,
35
+ max_new_tokens=100,
36
+ temperature=0.1,
37
+ stop=["\n"],
38
+ )
39
+ cleaned_response = response.strip()
40
+ print(f" Raw model response:\n{response}\n")
41
+ print(f"✅ Cleaned response to submit:\n{cleaned_response}\n")
42
+
43
+ # Parse the response to extract the final answer if it follows the template
44
+ if "FINAL ANSWER:" in cleaned_response:
45
+ final_answer_part = cleaned_response.split("FINAL ANSWER:")[1]
46
  final_answer = final_answer_part.strip()
47
  if final_answer.startswith('[') and final_answer.endswith(']'):
48
  final_answer = final_answer[1:-1]
49
+ print(f"✅ Extracted final answer: {final_answer}")
 
50
  return final_answer
51
  else:
52
+ print(f"⚠️ No 'FINAL ANSWER:' found, returning cleaned response")
53
+ return cleaned_response
54
+
55
  except Exception as e:
56
+ import traceback
57
+ traceback.print_exc()
58
+ print(f"❌ AGENT ERROR: {e}")
59
  return f"AGENT ERROR: {e}"
60
 
61
 
 
187
  """
188
  **Instructions:**
189
 
190
+ 1. This space uses SmolLM-1.7B-Instruct model with direct inference for question answering.
191
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
192
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
193
 
194
  ---
195
  **Model Information:**
196
  - Using: HuggingFaceTB/SmolLM-1.7B-Instruct
197
+ - Framework: Direct InferenceClient (optimized for single-line answers)
198
  - No additional tools (pure reasoning)
199
 
200
  **Disclaimers:**
requirements.txt CHANGED
@@ -1,6 +1,4 @@
1
- smolagents
2
  huggingface_hub
3
  gradio
4
  requests
5
- pandas
6
- duckduckgo-search
 
 
1
  huggingface_hub
2
  gradio
3
  requests
4
+ pandas