pmeyhoefer commited on
Commit
a00dcc4
·
verified ·
1 Parent(s): a7c132d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -40
app.py CHANGED
@@ -3,20 +3,19 @@ import gradio as gr
3
  import requests
4
  import pandas as pd
5
  from smolagents import CodeAgent, InferenceClientModel
6
- from huggingface_hub import HfFolder
7
 
8
  # --- Constants ---
9
- # API-URL deines Spaces (mit /api)
10
- DEFAULT_API_URL = "https://pmeyhoefer-final-assignment-template.hf.space/api"
11
- # Modell-ID und HF-Token
12
  MODEL_ID = os.getenv("SMOL_MODEL_ID", "meta-llama/Llama-3.3-70B-Instruct")
13
- HF_TOKEN = HfFolder.get_token()
14
 
15
  # --- Agent-Implementierung mit smolagents ---
16
  class BasicAgent:
17
  def __init__(self):
18
- if not HF_TOKEN:
19
- raise ValueError("Kein HF_HUB_TOKEN gesetzt! Lege ihn in den Space-Secrets an. ")
20
  # InferenceClientModel initialisieren
21
  self.model = InferenceClientModel(
22
  model_id=MODEL_ID,
@@ -37,21 +36,23 @@ class BasicAgent:
37
 
38
  # --- Evaluation & Submission ---
39
  def run_and_submit_all(profile: gr.OAuthProfile | None):
 
40
  if not profile:
41
  return "Bitte logge dich zuerst bei Hugging Face ein.", None
42
  username = profile.username
43
  space_id = os.getenv("SPACE_ID")
44
 
 
45
  questions_url = f"{DEFAULT_API_URL}/questions"
46
- submit_url = f"{DEFAULT_API_URL}/submit"
47
 
48
- # Agent instanziieren
49
  try:
50
  agent = BasicAgent()
51
  except Exception as e:
52
  return f"Fehler beim Initialisieren des Agents: {e}", None
53
 
54
- # Fragen abrufen
55
  try:
56
  resp = requests.get(questions_url, timeout=15)
57
  resp.raise_for_status()
@@ -59,26 +60,33 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
59
  except Exception as e:
60
  return f"Fehler beim Abrufen der Fragen: {e}", None
61
 
62
- # Antworten generieren
63
  records = []
64
  answers = []
65
  for item in questions:
66
- task_id = item.get("task_id")
67
- question_text = item.get("question") or item.get("instruction", "")
68
- if not task_id or not question_text:
69
  continue
70
- ans = agent(question_text)
71
- answers.append({"task_id": task_id, "submitted_answer": ans})
72
- records.append({"Task ID": task_id, "Question": question_text, "Antwort": ans})
 
 
 
 
 
 
 
73
 
74
  if not answers:
75
  return "Der Agent hat keine Antworten produziert.", pd.DataFrame(records)
76
 
77
- # Submission
78
  submission = {
79
- "username": username.strip(),
80
  "agent_code": f"https://huggingface.co/spaces/{space_id}/tree/main",
81
- "answers": answers
82
  }
83
  try:
84
  resp = requests.post(submit_url, json=submission, timeout=60)
@@ -87,7 +95,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
87
  status = (
88
  f"Erfolgreich eingereicht!\n"
89
  f"User: {result.get('username')}\n"
90
- f"Score: {result.get('score')}% ({result.get('correct_count')}/{result.get('total_attempted')})\n"
 
91
  f"Nachricht: {result.get('message')}"
92
  )
93
  except Exception as e:
@@ -99,27 +108,19 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
99
  # --- Gradio UI ---
100
  with gr.Blocks() as demo:
101
  gr.Markdown("# GAIA Agent Evaluation Runner")
102
- gr.Markdown(
103
- """
104
- 1. Lege in den Space-Secrets deinen `HF_HUB_TOKEN` an.
105
  2. Optional: Lege `SMOL_MODEL_ID` in den Secrets an (Standard: meta-llama/Llama-3.3-70B-Instruct).
106
  3. Aktualisiere `requirements.txt` mit:
107
- ```
108
- smolagents
109
- huggingface-hub
110
- gradio
111
- requests
112
- pandas
113
- ```
114
  4. Commit & Push, warte auf Deployment.
115
- 5. Logge dich ein und klicke auf **Run Evaluation & Submit All Answers**.
116
- """
117
- )
118
- gr.LoginButton()
119
- run_btn = gr.Button("Run Evaluation & Submit All Answers")
120
- status_out = gr.Textbox(label="Status / Ergebnis", lines=5, interactive=False)
121
- result_table = gr.DataFrame(label="Fragen & Antworten", wrap=True)
122
- run_btn.click(fn=run_and_submit_all, inputs=[], outputs=[status_out, result_table])
123
 
124
  if __name__ == "__main__":
125
- demo.launch(debug=True)
 
3
  import requests
4
  import pandas as pd
5
  from smolagents import CodeAgent, InferenceClientModel
 
6
 
7
  # --- Constants ---
8
+ # API-URL deines Spaces (ohne "/api"-Suffix)
9
+ DEFAULT_API_URL = "https://pmeyhoefer-final-assignment-template.hf.space"
10
+ # Modell-ID und HF-Token (bitte hier deinen HF Access Token einfügen)
11
  MODEL_ID = os.getenv("SMOL_MODEL_ID", "meta-llama/Llama-3.3-70B-Instruct")
12
+ HF_TOKEN = "<DEIN_HF_HUB_TOKEN>" # Ersetze durch deinen echten Hugging Face Token
13
 
14
  # --- Agent-Implementierung mit smolagents ---
15
  class BasicAgent:
16
  def __init__(self):
17
+ if not HF_TOKEN or HF_TOKEN.startswith("<"):
18
+ raise ValueError("Kein gültiger HF_HUB_TOKEN im Code gesetzt!")
19
  # InferenceClientModel initialisieren
20
  self.model = InferenceClientModel(
21
  model_id=MODEL_ID,
 
36
 
37
  # --- Evaluation & Submission ---
38
  def run_and_submit_all(profile: gr.OAuthProfile | None):
39
+ # 1. Authentifizierung
40
  if not profile:
41
  return "Bitte logge dich zuerst bei Hugging Face ein.", None
42
  username = profile.username
43
  space_id = os.getenv("SPACE_ID")
44
 
45
+ # 2. Endpunkte
46
  questions_url = f"{DEFAULT_API_URL}/questions"
47
+ submit_url = f"{DEFAULT_API_URL}/submit"
48
 
49
+ # 3. Agent instanziieren
50
  try:
51
  agent = BasicAgent()
52
  except Exception as e:
53
  return f"Fehler beim Initialisieren des Agents: {e}", None
54
 
55
+ # 4. Fragen abrufen
56
  try:
57
  resp = requests.get(questions_url, timeout=15)
58
  resp.raise_for_status()
 
60
  except Exception as e:
61
  return f"Fehler beim Abrufen der Fragen: {e}", None
62
 
63
+ # 5. Antworten generieren
64
  records = []
65
  answers = []
66
  for item in questions:
67
+ task_id = item.get("task_id")
68
+ question_txt = item.get("question") or item.get("instruction", "")
69
+ if not task_id or not question_txt:
70
  continue
71
+ ans = agent(question_txt)
72
+ answers.append({
73
+ "task_id": task_id,
74
+ "submitted_answer": ans
75
+ })
76
+ records.append({
77
+ "Task ID": task_id,
78
+ "Question": question_txt,
79
+ "Antwort": ans
80
+ })
81
 
82
  if not answers:
83
  return "Der Agent hat keine Antworten produziert.", pd.DataFrame(records)
84
 
85
+ # 6. Submission
86
  submission = {
87
+ "username": username.strip(),
88
  "agent_code": f"https://huggingface.co/spaces/{space_id}/tree/main",
89
+ "answers": answers
90
  }
91
  try:
92
  resp = requests.post(submit_url, json=submission, timeout=60)
 
95
  status = (
96
  f"Erfolgreich eingereicht!\n"
97
  f"User: {result.get('username')}\n"
98
+ f"Score: {result.get('score')}% "
99
+ f"({result.get('correct_count')}/{result.get('total_attempted')})\n"
100
  f"Nachricht: {result.get('message')}"
101
  )
102
  except Exception as e:
 
108
  # --- Gradio UI ---
109
  with gr.Blocks() as demo:
110
  gr.Markdown("# GAIA Agent Evaluation Runner")
111
+ gr.Markdown("""
112
+ 1. Füge in den Space-Secrets deinen `HF_HUB_TOKEN` ein (oder setze ihn direkt im Code oben).
 
113
  2. Optional: Lege `SMOL_MODEL_ID` in den Secrets an (Standard: meta-llama/Llama-3.3-70B-Instruct).
114
  3. Aktualisiere `requirements.txt` mit:
 
 
 
 
 
 
 
115
  4. Commit & Push, warte auf Deployment.
116
+ 5. Logge dich mit dem Hugging Face Button ein.
117
+ 6. Klicke auf **Run Evaluation & Submit All Answers**.
118
+ """)
119
+ gr.LoginButton()
120
+ run_btn = gr.Button("Run Evaluation & Submit All Answers")
121
+ status_out = gr.Textbox(label="Status / Ergebnis", lines=5, interactive=False)
122
+ result_table = gr.DataFrame(label="Fragen & Antworten", wrap=True)
123
+ run_btn.click(fn=run_and_submit_all, inputs=[], outputs=[status_out, result_table])
124
 
125
  if __name__ == "__main__":
126
+ demo.launch(debug=True)