lp128396 commited on
Commit
f61972d
·
verified ·
1 Parent(s): 816610e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -10,17 +10,20 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  class HybridAgent:
12
  def __init__(self):
 
13
  self.llm = pipeline("text-generation", model="google/flan-t5-small")
14
-
15
  def __call__(self, question: str, task_id: str = None) -> str:
16
- print("=== Running on task:", task_id)
 
17
  print("Question:", question)
18
 
 
19
  if task_id:
20
  resp = requests.get(f"{DEFAULT_API_URL}/files/{task_id}")
21
  if resp.status_code == 200:
22
  file_content = resp.text
23
- print("File content preview:", file_content[:200])
24
  try:
25
  df = pd.read_csv(StringIO(file_content))
26
  q_lower = question.lower()
@@ -30,14 +33,16 @@ class HybridAgent:
30
  return str(df.iloc[:, 0].sum())
31
  return df.head().to_string(index=False)
32
  except Exception as e:
33
- print("Error reading CSV, returning raw:", e)
34
  return file_content.strip()
35
 
 
36
  prompt = f"Answer this question briefly and only with the final answer:\n{question}"
37
  result = self.llm(prompt, max_length=50)[0]["generated_text"]
 
38
  cleaned = result.replace("Final Answer:", "").replace("Answer:", "").strip()
39
- print("Raw LLM response:", result)
40
- print("Cleaned answer:", cleaned)
41
  return cleaned
42
 
43
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
10
 
11
  class HybridAgent:
12
  def __init__(self):
13
+ # Load LLM for text QA
14
  self.llm = pipeline("text-generation", model="google/flan-t5-small")
15
+
16
  def __call__(self, question: str, task_id: str = None) -> str:
17
+ # Debug logs to inspect behavior
18
+ print("=== Task ID:", task_id)
19
  print("Question:", question)
20
 
21
+ # Handle file-based questions
22
  if task_id:
23
  resp = requests.get(f"{DEFAULT_API_URL}/files/{task_id}")
24
  if resp.status_code == 200:
25
  file_content = resp.text
26
+ print("File preview:", file_content[:200])
27
  try:
28
  df = pd.read_csv(StringIO(file_content))
29
  q_lower = question.lower()
 
33
  return str(df.iloc[:, 0].sum())
34
  return df.head().to_string(index=False)
35
  except Exception as e:
36
+ print("CSV reading error:", e)
37
  return file_content.strip()
38
 
39
+ # Fallback to LLM for text questions
40
  prompt = f"Answer this question briefly and only with the final answer:\n{question}"
41
  result = self.llm(prompt, max_length=50)[0]["generated_text"]
42
+ # Remove unwanted prefixes
43
  cleaned = result.replace("Final Answer:", "").replace("Answer:", "").strip()
44
+ print("Raw response:", result)
45
+ print("Clean answer:", cleaned)
46
  return cleaned
47
 
48
  def run_and_submit_all( profile: gr.OAuthProfile | None):