victor-johnson commited on
Commit
dc2c4fe
Β·
verified Β·
1 Parent(s): 8b8cbf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -40
app.py CHANGED
@@ -1,10 +1,8 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
6
  import re
7
- import json
8
  import textwrap
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
  import torch
@@ -73,6 +71,7 @@ class BasicAgent:
73
  print(f"πŸ’‘ Agent raw: '{raw_answer[:80]}' β†’ clean: '{clean_ans}'")
74
  return clean_ans
75
 
 
76
  # --- Test Questions & Expected Answers ---
77
  QUESTIONS_AND_ANSWERS = [
78
  {"question": "What is the capital of France?", "expected": "Paris"},
@@ -82,6 +81,7 @@ QUESTIONS_AND_ANSWERS = [
82
  {"question": "Who wrote 'Romeo and Juliet'?", "expected": "Shakespeare"},
83
  ]
84
 
 
85
  # --- Submission Function ---
86
  def submit_answers(answers: list, token: str) -> dict:
87
  """
@@ -109,38 +109,35 @@ def submit_answers(answers: list, token: str) -> dict:
109
  except Exception as e:
110
  return {"success": False, "message": str(e)}
111
 
112
- # --- Main Run Function ---
113
- def run_and_submit_all(profile: gr.OAuthProfile | None = None, logged_in: bool | None = None):
114
- if not profile:
115
- return "❌ You must log in first.", pd.DataFrame()
116
-
117
- # Try to get the token using multiple approaches
118
- token = None
119
- # Try direct attributes
120
- if hasattr(profile, '_oauth_token'):
121
- token = profile._oauth_token
122
- elif hasattr(profile, 'oauth_token'):
123
- token_obj = profile.oauth_token
124
- if hasattr(token_obj, 'access_token'):
125
- token = token_obj.access_token
126
- elif isinstance(token_obj, dict) and 'access_token' in token_obj:
127
- token = token_obj['access_token']
128
- else:
129
- token = token_obj
130
-
131
  if not token:
132
- # Debug: print what attributes the profile actually has
133
- attrs = [attr for attr in dir(profile) if not attr.startswith('__')]
134
- print(f"πŸ” Profile attributes: {attrs}")
135
- return f"❌ No token found. Profile type: {type(profile).__name__}. Available attributes: {', '.join(attrs[:10])}", pd.DataFrame()
136
-
137
- # Instantiate the agent
 
138
  try:
139
  agent = BasicAgent()
140
  except Exception as e:
141
  return f"❌ Error instantiating agent: {e}", pd.DataFrame()
142
-
143
- # Collect answers
144
  results = []
145
  for qa in QUESTIONS_AND_ANSWERS:
146
  q = qa["question"]
@@ -150,14 +147,14 @@ def run_and_submit_all(profile: gr.OAuthProfile | None = None, logged_in: bool |
150
  except Exception as e:
151
  answer = f"[Error: {e}]"
152
  results.append({"question": q, "answer": answer, "expected": expected})
153
-
154
- # Build DataFrame
155
  df = pd.DataFrame(results)
156
-
157
- # Submit
158
  answers_list = [r["answer"] for r in results]
159
  submission_result = submit_answers(answers_list, token)
160
-
161
  if submission_result.get("success"):
162
  msg = submission_result.get("message", "βœ… Submission successful")
163
  return f"βœ… {msg}", df
@@ -165,24 +162,32 @@ def run_and_submit_all(profile: gr.OAuthProfile | None = None, logged_in: bool |
165
  msg = submission_result.get("message", "Unknown error")
166
  return f"❌ Submission failed: {msg}", df
167
 
 
168
  # --- Gradio Interface ---
169
  with gr.Blocks() as demo:
170
- gr.Markdown("# Basic Agent Evaluation Runner")
171
  gr.Markdown(
172
  """
173
  **Instructions:**
174
  1. Log in to your Hugging Face account.
175
- 2. Click 'Run Evaluation & Submit All Answers'.
 
 
176
  ---
177
  The agent now runs *locally* inside the Space instead of using the API.
178
  """
179
  )
180
  login_button = gr.LoginButton()
181
- run_button = gr.Button("Run Evaluation & Submit All Answers")
182
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
183
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
184
-
185
- run_button.click(fn=run_and_submit_all, inputs=[login_button], outputs=[status_output, results_table])
 
 
 
 
 
186
 
187
  # --- Launch ---
188
  if __name__ == "__main__":
@@ -206,4 +211,4 @@ if __name__ == "__main__":
206
 
207
  print("-" * (60 + len(" App Starting ")) + "\n")
208
  print("Launching Gradio Interface for Basic Agent Evaluation...")
209
- demo.launch(debug=True, share=False)
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
  import re
 
6
  import textwrap
7
  from transformers import AutoTokenizer, AutoModelForCausalLM
8
  import torch
 
71
  print(f"πŸ’‘ Agent raw: '{raw_answer[:80]}' β†’ clean: '{clean_ans}'")
72
  return clean_ans
73
 
74
+
75
  # --- Test Questions & Expected Answers ---
76
  QUESTIONS_AND_ANSWERS = [
77
  {"question": "What is the capital of France?", "expected": "Paris"},
 
81
  {"question": "Who wrote 'Romeo and Juliet'?", "expected": "Shakespeare"},
82
  ]
83
 
84
+
85
  # --- Submission Function ---
86
  def submit_answers(answers: list, token: str) -> dict:
87
  """
 
109
  except Exception as e:
110
  return {"success": False, "message": str(e)}
111
 
112
+
113
+ # --- Main Run Function (fixed for HF_TOKEN) ---
114
+ def run_and_submit_all(profile: gr.OAuthProfile | None = None, *_):
115
+ """
116
+ Runs the local agent and submits answers.
117
+ Uses HF_TOKEN from environment variables instead of Gradio OAuth token.
118
+ """
119
+ # --- Step 1: Detect user login ---
120
+ if profile and hasattr(profile, "name"):
121
+ print(f"πŸ‘€ Logged in as: {profile.name}")
122
+ else:
123
+ print("⚠️ No OAuth profile detected (this is fine if HF_TOKEN is set).")
124
+
125
+ # --- Step 2: Get token from environment variable ---
126
+ token = os.getenv("HF_TOKEN")
 
 
 
 
127
  if not token:
128
+ return (
129
+ "❌ No token found. Please set your Hugging Face token as an environment variable named `HF_TOKEN`.\n"
130
+ "In your Space: Settings β†’ Repository secrets β†’ Add new secret β†’ Name: HF_TOKEN, Value: your_token_here",
131
+ pd.DataFrame(),
132
+ )
133
+
134
+ # --- Step 3: Instantiate the agent ---
135
  try:
136
  agent = BasicAgent()
137
  except Exception as e:
138
  return f"❌ Error instantiating agent: {e}", pd.DataFrame()
139
+
140
+ # --- Step 4: Collect answers ---
141
  results = []
142
  for qa in QUESTIONS_AND_ANSWERS:
143
  q = qa["question"]
 
147
  except Exception as e:
148
  answer = f"[Error: {e}]"
149
  results.append({"question": q, "answer": answer, "expected": expected})
150
+
151
+ # --- Step 5: Build DataFrame ---
152
  df = pd.DataFrame(results)
153
+
154
+ # --- Step 6: Submit ---
155
  answers_list = [r["answer"] for r in results]
156
  submission_result = submit_answers(answers_list, token)
157
+
158
  if submission_result.get("success"):
159
  msg = submission_result.get("message", "βœ… Submission successful")
160
  return f"βœ… {msg}", df
 
162
  msg = submission_result.get("message", "Unknown error")
163
  return f"❌ Submission failed: {msg}", df
164
 
165
+
166
  # --- Gradio Interface ---
167
  with gr.Blocks() as demo:
168
+ gr.Markdown("# 🧠 Basic Agent Evaluation Runner")
169
  gr.Markdown(
170
  """
171
  **Instructions:**
172
  1. Log in to your Hugging Face account.
173
+ 2. Add your Hugging Face token as a secret named `HF_TOKEN` in your Space settings.
174
+ 3. Click **Run Evaluation & Submit All Answers**.
175
+
176
  ---
177
  The agent now runs *locally* inside the Space instead of using the API.
178
  """
179
  )
180
  login_button = gr.LoginButton()
181
+ run_button = gr.Button("πŸš€ Run Evaluation & Submit All Answers")
182
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
183
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
184
+
185
+ run_button.click(
186
+ fn=run_and_submit_all,
187
+ inputs=[login_button],
188
+ outputs=[status_output, results_table],
189
+ )
190
+
191
 
192
  # --- Launch ---
193
  if __name__ == "__main__":
 
211
 
212
  print("-" * (60 + len(" App Starting ")) + "\n")
213
  print("Launching Gradio Interface for Basic Agent Evaluation...")
214
+ demo.launch(debug=True, share=False)