d1
Browse files
app.py
CHANGED
|
@@ -3,27 +3,32 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
import requests
|
| 9 |
-
import transformers
|
| 10 |
|
| 11 |
-
# Instantiate your agent
|
| 12 |
-
agent = PostpartumResearchAgent()
|
| 13 |
# (Keep Constants as is)
|
| 14 |
# --- Constants ---
|
| 15 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 16 |
|
| 17 |
# --- Basic Agent Definition ---
|
| 18 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
| 19 |
class BasicAgent:
|
| 20 |
def __init__(self):
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
| 22 |
def __call__(self, question: str) -> str:
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 29 |
"""
|
|
@@ -123,80 +128,4 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 123 |
try:
|
| 124 |
error_json = e.response.json()
|
| 125 |
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
| 126 |
-
except requests.
|
| 127 |
-
error_detail += f" Response: {e.response.text[:500]}"
|
| 128 |
-
status_message = f"Submission Failed: {error_detail}"
|
| 129 |
-
print(status_message)
|
| 130 |
-
results_df = pd.DataFrame(results_log)
|
| 131 |
-
return status_message, results_df
|
| 132 |
-
except requests.exceptions.Timeout:
|
| 133 |
-
status_message = "Submission Failed: The request timed out."
|
| 134 |
-
print(status_message)
|
| 135 |
-
results_df = pd.DataFrame(results_log)
|
| 136 |
-
return status_message, results_df
|
| 137 |
-
except requests.exceptions.RequestException as e:
|
| 138 |
-
status_message = f"Submission Failed: Network error - {e}"
|
| 139 |
-
print(status_message)
|
| 140 |
-
results_df = pd.DataFrame(results_log)
|
| 141 |
-
return status_message, results_df
|
| 142 |
-
except Exception as e:
|
| 143 |
-
status_message = f"An unexpected error occurred during submission: {e}"
|
| 144 |
-
print(status_message)
|
| 145 |
-
results_df = pd.DataFrame(results_log)
|
| 146 |
-
return status_message, results_df
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
# --- Build Gradio Interface using Blocks ---
|
| 150 |
-
with gr.Blocks() as demo:
|
| 151 |
-
gr.Markdown("# Basic Agent Evaluation Runner")
|
| 152 |
-
gr.Markdown(
|
| 153 |
-
"""
|
| 154 |
-
**Instructions:**
|
| 155 |
-
|
| 156 |
-
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
| 157 |
-
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
| 158 |
-
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
| 159 |
-
|
| 160 |
-
---
|
| 161 |
-
**Disclaimers:**
|
| 162 |
-
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
| 163 |
-
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
| 164 |
-
"""
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
gr.LoginButton()
|
| 168 |
-
|
| 169 |
-
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 170 |
-
|
| 171 |
-
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 172 |
-
# Removed max_rows=10 from DataFrame constructor
|
| 173 |
-
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 174 |
-
|
| 175 |
-
run_button.click(
|
| 176 |
-
fn=run_and_submit_all,
|
| 177 |
-
outputs=[status_output, results_table]
|
| 178 |
-
)
|
| 179 |
-
|
| 180 |
-
if __name__ == "__main__":
|
| 181 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 182 |
-
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 183 |
-
space_host_startup = os.getenv("SPACE_HOST")
|
| 184 |
-
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
| 185 |
-
|
| 186 |
-
if space_host_startup:
|
| 187 |
-
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 188 |
-
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 189 |
-
else:
|
| 190 |
-
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 191 |
-
|
| 192 |
-
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 193 |
-
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 194 |
-
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 195 |
-
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 196 |
-
else:
|
| 197 |
-
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
| 198 |
-
|
| 199 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 200 |
-
|
| 201 |
-
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 202 |
-
demo.launch(debug=True, share=False)
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from huggingface_hub import InferenceClient
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
|
| 9 |
+
load_dotenv()
|
|
|
|
|
|
|
| 10 |
|
|
|
|
|
|
|
| 11 |
# (Keep Constants as is)
|
| 12 |
# --- Constants ---
|
| 13 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 14 |
|
| 15 |
# --- Basic Agent Definition ---
|
| 16 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 17 |
+
|
| 18 |
class BasicAgent:
|
| 19 |
def __init__(self):
|
| 20 |
+
self.token = os.getenv("token")
|
| 21 |
+
self.model_id = "meta-llama/Meta-Llama-3-70B-Instruct"
|
| 22 |
+
self.client = InferenceClient(model=self.model_id, token=self.token)
|
| 23 |
+
|
| 24 |
def __call__(self, question: str) -> str:
|
| 25 |
+
prompt = f"Answer this question concisely and clearly. Only return the final answer.\nQuestion: {question}"
|
| 26 |
+
try:
|
| 27 |
+
response = self.client.text_generation(prompt, max_new_tokens=100)
|
| 28 |
+
return response.strip()
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print(f"Error calling inference API: {e}")
|
| 31 |
+
return f"error: {e} | token used: {self.token}"
|
| 32 |
|
| 33 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 34 |
"""
|
|
|
|
| 128 |
try:
|
| 129 |
error_json = e.response.json()
|
| 130 |
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
| 131 |
+
except requests.exceptio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|