lp128396 commited on
Commit
816610e
·
verified ·
1 Parent(s): a248fdb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -26
app.py CHANGED
@@ -1,55 +1,44 @@
1
  import torch
2
  import os
3
- import gradio as gr
4
  import requests
5
- import inspect
6
  import pandas as pd
 
7
  from transformers import pipeline
8
 
9
-
10
- # (Keep Constants as is)
11
- # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
14
- # --- Basic Agent Definition ---
15
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
  class HybridAgent:
17
  def __init__(self):
18
- # Load a small language model for answering text questions
19
  self.llm = pipeline("text-generation", model="google/flan-t5-small")
20
 
21
  def __call__(self, question: str, task_id: str = None) -> str:
22
- """
23
- Decide how to answer:
24
- - If there is a file, fetch it and try to compute the answer with pandas
25
- - Otherwise, use the language model
26
- """
27
- # --- Step 1: Handle file-based questions ---
28
  if task_id:
29
  resp = requests.get(f"{DEFAULT_API_URL}/files/{task_id}")
30
  if resp.status_code == 200:
31
  file_content = resp.text
 
32
  try:
33
- # Try reading it as CSV
34
  df = pd.read_csv(StringIO(file_content))
35
-
36
- # Very simple heuristics: if question asks for "average"
37
- if "average" in question.lower() or "mean" in question.lower():
38
  return str(df.iloc[:, 0].mean())
39
- if "sum" in question.lower() or "total" in question.lower():
40
  return str(df.iloc[:, 0].sum())
41
-
42
- # Otherwise just return first few rows (fallback)
43
  return df.head().to_string(index=False)
44
- except Exception:
45
- # If not CSV or fails, just give the raw file text
46
  return file_content.strip()
47
 
48
- # --- Step 2: Handle text-only questions with LLM ---
49
  prompt = f"Answer this question briefly and only with the final answer:\n{question}"
50
  result = self.llm(prompt, max_length=50)[0]["generated_text"]
51
- return result.strip()
52
-
 
 
53
 
54
  def run_and_submit_all( profile: gr.OAuthProfile | None):
55
  """
 
1
  import torch
2
  import os
 
3
  import requests
4
+ import gradio as gr
5
  import pandas as pd
6
+ from io import StringIO
7
  from transformers import pipeline
8
 
 
 
 
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
 
 
11
  class HybridAgent:
12
  def __init__(self):
 
13
  self.llm = pipeline("text-generation", model="google/flan-t5-small")
14
 
15
  def __call__(self, question: str, task_id: str = None) -> str:
16
+ print("=== Running on task:", task_id)
17
+ print("Question:", question)
18
+
 
 
 
19
  if task_id:
20
  resp = requests.get(f"{DEFAULT_API_URL}/files/{task_id}")
21
  if resp.status_code == 200:
22
  file_content = resp.text
23
+ print("File content preview:", file_content[:200])
24
  try:
 
25
  df = pd.read_csv(StringIO(file_content))
26
+ q_lower = question.lower()
27
+ if "average" in q_lower or "mean" in q_lower:
 
28
  return str(df.iloc[:, 0].mean())
29
+ if "sum" in q_lower or "total" in q_lower:
30
  return str(df.iloc[:, 0].sum())
 
 
31
  return df.head().to_string(index=False)
32
+ except Exception as e:
33
+ print("Error reading CSV, returning raw:", e)
34
  return file_content.strip()
35
 
 
36
  prompt = f"Answer this question briefly and only with the final answer:\n{question}"
37
  result = self.llm(prompt, max_length=50)[0]["generated_text"]
38
+ cleaned = result.replace("Final Answer:", "").replace("Answer:", "").strip()
39
+ print("Raw LLM response:", result)
40
+ print("Cleaned answer:", cleaned)
41
+ return cleaned
42
 
43
  def run_and_submit_all( profile: gr.OAuthProfile | None):
44
  """