|
|
import inspect |
|
|
import json |
|
|
import os |
|
|
from io import BytesIO |
|
|
from typing import Optional |
|
|
|
|
|
import gradio as gr |
|
|
import pandas as pd |
|
|
import requests |
|
|
import whisper |
|
|
from bs4 import BeautifulSoup, NavigableString, Tag |
|
|
from PIL import Image |
|
|
from smolagents import ( |
|
|
CodeAgent, |
|
|
GoogleSearchTool, |
|
|
InferenceClientModel, |
|
|
load_tool, |
|
|
OpenAIServerModel, |
|
|
tool, |
|
|
Tool, |
|
|
ToolCollection, |
|
|
VisitWebpageTool, |
|
|
WikipediaSearchTool, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" |
|
|
|
|
|
|
|
|
@tool |
|
|
def extract_table_from_html(html: str, match: str | None = None) -> list: |
|
|
""" |
|
|
A tool that extracts HTML tables from HTML content and returns them as pandas DataFrames. |
|
|
Example usecases include extracting tables from Wikipedia pages, HTML emails, or other web content. |
|
|
Args: |
|
|
html (str): The HTML content containing HTML tables to extract. This can be raw HTML |
|
|
string content or a URL to a webpage. |
|
|
match (str | None, optional): A string or regular expression pattern to match |
|
|
against table text content. If None, all tables |
|
|
are extracted. Defaults to None. |
|
|
DO NOT use HTML strings / tags in this parameter. |
|
|
|
|
|
Returns: |
|
|
list: A list of pandas DataFrames, where each DataFrame represents a table found |
|
|
in the HTML content. Returns an empty list if no tables are found. |
|
|
""" |
|
|
import pandas as pd |
|
|
|
|
|
try: |
|
|
|
|
|
if match is not None: |
|
|
tables = pd.read_html(html, match=match) |
|
|
else: |
|
|
tables = pd.read_html(html) |
|
|
|
|
|
|
|
|
return tables if tables else [] |
|
|
|
|
|
except ValueError as e: |
|
|
if "No tables found" in str(e): |
|
|
|
|
|
return [] |
|
|
else: |
|
|
raise ValueError(f"Error extracting tables from HTML content: {e}") |
|
|
except Exception as e: |
|
|
raise Exception(f"Failed to extract tables from HTML content: {e}") |
|
|
|
|
|
|
|
|
@tool |
|
|
def audio_to_text(file_path: str) -> str: |
|
|
""" |
|
|
A tool that converts audio files to text using OpenAI's Whisper speech recognition model. |
|
|
|
|
|
This function transcribes audio content from a local audio file and returns the transcript |
|
|
as a JSON string containing timestamped segments. It uses the Whisper "base" model for |
|
|
speech-to-text conversion. |
|
|
|
|
|
Args: |
|
|
file_path (str): The local file path to the audio file to be transcribed. |
|
|
Supports common audio formats like MP3, WAV, M4A, FLAC, etc. |
|
|
|
|
|
Returns: |
|
|
str: A JSON string containing the transcript data with the following structure: |
|
|
{ |
|
|
"transcript": [ |
|
|
{ |
|
|
"start": float, # Start time in seconds |
|
|
"end": float, # End time in seconds |
|
|
"text": str # Transcribed text segment |
|
|
}, |
|
|
... |
|
|
] |
|
|
} |
|
|
|
|
|
Raises: |
|
|
FileNotFoundError: If the specified audio file does not exist. |
|
|
Exception: If the audio file cannot be processed or transcribed. |
|
|
|
|
|
Example: |
|
|
>>> result = audio_to_text("path/to/audio.mp3") |
|
|
>>> import json |
|
|
>>> transcript_data = json.loads(result) |
|
|
>>> for segment in transcript_data["transcript"]: |
|
|
... print(f"{segment['start']:.2f}s - {segment['end']:.2f}s: {segment['text']}") |
|
|
|
|
|
Note: |
|
|
- Uses OpenAI Whisper "base" model for transcription |
|
|
- Processes audio without verbose output or word-level timestamps |
|
|
- Returns empty segments list if no speech is detected |
|
|
- Processing time depends on audio file length and system performance |
|
|
""" |
|
|
import json |
|
|
|
|
|
import whisper |
|
|
|
|
|
model = whisper.load_model("base") |
|
|
result = model.transcribe(file_path, verbose=False, word_timestamps=False) |
|
|
|
|
|
transcript_data = [ |
|
|
{ |
|
|
"start": segment["start"], |
|
|
"end": segment["end"], |
|
|
"text": segment["text"].strip(), |
|
|
} |
|
|
for segment in result["segments"] |
|
|
] |
|
|
|
|
|
return json.dumps({"transcript": transcript_data}) |
|
|
|
|
|
|
|
|
@tool |
|
|
def get_wikipedia_page_url_by_year(wikipedia_page_name: str, year: int) -> str: |
|
|
""" |
|
|
Retrieve Wikipedia page URL for a specific year (latest revision in that year). |
|
|
|
|
|
Args: |
|
|
wikipedia_page_name (str): Name of the Wikipedia page |
|
|
year (int): Year to get the page content from |
|
|
|
|
|
Returns: |
|
|
str: URL of the Wikipedia page from that year with revision included |
|
|
""" |
|
|
import requests |
|
|
import wikipediaapi |
|
|
|
|
|
|
|
|
wiki = wikipediaapi.Wikipedia( |
|
|
user_agent="Final Project Agent Course (vthanhvinh@gmail.com)", |
|
|
language="en", |
|
|
) |
|
|
|
|
|
|
|
|
page = wiki.page(wikipedia_page_name) |
|
|
if not page.exists(): |
|
|
raise ValueError(f"Wikipedia page '{wikipedia_page_name}' does not exist") |
|
|
|
|
|
|
|
|
api_url = "https://en.wikipedia.org/w/api.php" |
|
|
|
|
|
|
|
|
params = { |
|
|
"action": "query", |
|
|
"format": "json", |
|
|
"prop": "revisions", |
|
|
"titles": wikipedia_page_name, |
|
|
"rvprop": "ids|timestamp", |
|
|
"rvend": f"{year}-12-31T23:59:59Z", |
|
|
"rvstart": f"{year}-01-01T00:00:00Z", |
|
|
"rvdir": "newer", |
|
|
"rvlimit": 1, |
|
|
} |
|
|
|
|
|
response = requests.get(api_url, params=params) |
|
|
data = response.json() |
|
|
pages = data["query"]["pages"] |
|
|
page_id = list(pages.keys())[0] |
|
|
revisions = pages[page_id].get("revisions", []) |
|
|
|
|
|
if not revisions: |
|
|
raise ValueError( |
|
|
f"No revisions found for '{wikipedia_page_name}' in year {year}" |
|
|
) |
|
|
|
|
|
|
|
|
rev_id = revisions[0]["revid"] |
|
|
url = f"https://en.wikipedia.org/w/index.php?title={wikipedia_page_name}&oldid={rev_id}" |
|
|
|
|
|
return url |
|
|
|
|
|
|
|
|
@tool |
|
|
def get_wikipedia_section_tables( |
|
|
section_name: str, soup_object: BeautifulSoup |
|
|
) -> list[pd.DataFrame]: |
|
|
""" |
|
|
A tool that extracts tables from a specific section of a Wikipedia page using BeautifulSoup and pandas. |
|
|
|
|
|
This function searches for a section in the following order: |
|
|
1. First tries to find an element with ID matching the section name |
|
|
2. If not found, tries to find an h2 element with text matching the section name |
|
|
3. If not found, tries to find an h3 element with text matching the section name |
|
|
|
|
|
Once the section is found, it goes to the parent element, finds the next <table> sibling, |
|
|
and uses pandas read_html to extract the table data. |
|
|
|
|
|
Args: |
|
|
section_name (str): The name of the section to extract table from |
|
|
soup_object: A BeautifulSoup object containing the parsed HTML content |
|
|
|
|
|
Returns: |
|
|
list: A list of pandas DataFrames representing tables found after the section, |
|
|
or empty list if no tables found |
|
|
|
|
|
Example: |
|
|
>>> from bs4 import BeautifulSoup |
|
|
>>> html = "<html><body><h2>Statistics</h2><table><tr><td>Data</td></tr></table></body></html>" |
|
|
>>> soup = BeautifulSoup(html, 'html.parser') |
|
|
>>> tables = get_wikipedia_section_table("Statistics", soup) |
|
|
>>> print(tables[0] if tables else "No tables found") |
|
|
""" |
|
|
import pandas as pd |
|
|
from bs4 import BeautifulSoup |
|
|
|
|
|
if not soup_object: |
|
|
return [] |
|
|
|
|
|
|
|
|
if not isinstance(soup_object, BeautifulSoup): |
|
|
return [] |
|
|
|
|
|
section_element = None |
|
|
|
|
|
|
|
|
|
|
|
section_id = section_name.replace(" ", "_") |
|
|
element = soup_object.find(id=section_id) |
|
|
if element: |
|
|
section_element = element |
|
|
|
|
|
|
|
|
if not section_element: |
|
|
h2_elements = soup_object.find_all("h2") |
|
|
for h2 in h2_elements: |
|
|
if h2.get_text().strip() == section_name: |
|
|
section_element = h2 |
|
|
break |
|
|
|
|
|
|
|
|
if not section_element: |
|
|
h3_elements = soup_object.find_all("h3") |
|
|
for h3 in h3_elements: |
|
|
if h3.get_text().strip() == section_name: |
|
|
section_element = h3 |
|
|
break |
|
|
|
|
|
|
|
|
if not section_element: |
|
|
return [] |
|
|
|
|
|
|
|
|
parent = section_element.parent |
|
|
if not parent: |
|
|
return [] |
|
|
|
|
|
|
|
|
table = parent.find_next_sibling("table") |
|
|
if not table: |
|
|
return [] |
|
|
try: |
|
|
|
|
|
table_html = str(table) |
|
|
tables = pd.read_html(table_html) |
|
|
return tables if tables else [] |
|
|
except ValueError: |
|
|
|
|
|
return [] |
|
|
except Exception: |
|
|
|
|
|
return [] |
|
|
|
|
|
|
|
|
@tool |
|
|
def download_file(question_id: str, file_name: str) -> str: |
|
|
""" |
|
|
A tool that downloads file that was mentioned in a question and store it as local file. |
|
|
Returns a JSON string containing the file path and optionally the text content if the file has a text MIME type. |
|
|
|
|
|
Args: |
|
|
question_id: Question ID. |
|
|
file_name: File name. |
|
|
Returns: |
|
|
str: JSON string containing file information. Structure: |
|
|
- For text files: {"path": "local_path", "content": "file_content"} |
|
|
- For non-text files: {"path": "local_path"} |
|
|
""" |
|
|
import json |
|
|
import os |
|
|
|
|
|
import requests |
|
|
|
|
|
url = f"{DEFAULT_API_URL}/files/{question_id}" |
|
|
print(f"Fetching file from URL: {url}") |
|
|
|
|
|
|
|
|
response = None |
|
|
try: |
|
|
response = requests.get(url, timeout=30) |
|
|
response.raise_for_status() |
|
|
|
|
|
|
|
|
if not response.content: |
|
|
raise ValueError(f"Empty response received from {url}") |
|
|
|
|
|
|
|
|
content_type = response.headers.get("content-type", "").lower() |
|
|
print(f"Response content-type: {content_type}") |
|
|
print(f"Response content length: {len(response.content)} bytes") |
|
|
|
|
|
|
|
|
local_path = file_name |
|
|
|
|
|
|
|
|
with open(local_path, "wb") as f: |
|
|
f.write(response.content) |
|
|
|
|
|
print(f"File saved to: {local_path}") |
|
|
|
|
|
|
|
|
text_mime_types = [ |
|
|
"text/", |
|
|
"application/json", |
|
|
"application/xml", |
|
|
"application/javascript", |
|
|
"application/csv", |
|
|
"application/x-csv", |
|
|
"text/csv", |
|
|
] |
|
|
|
|
|
is_text_file = any( |
|
|
content_type.startswith(mime_type) for mime_type in text_mime_types |
|
|
) |
|
|
|
|
|
result = {"path": local_path} |
|
|
|
|
|
if is_text_file: |
|
|
|
|
|
text_content = response.content.decode("utf-8") |
|
|
result["content"] = text_content |
|
|
print( |
|
|
f"Added text content to result (length: {len(text_content)} characters)" |
|
|
) |
|
|
|
|
|
return json.dumps(result) |
|
|
|
|
|
except requests.exceptions.RequestException as e: |
|
|
raise ValueError(f"Failed to download file from {url}: {e}") |
|
|
except Exception as e: |
|
|
|
|
|
content_preview = ( |
|
|
response.content[:200] |
|
|
if response and hasattr(response, "content") |
|
|
else b"No response" |
|
|
) |
|
|
print(f"Error downloading file. Content preview: {content_preview}") |
|
|
raise ValueError(f"Failed to download file from {url}: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BasicAgent: |
|
|
def __init__(self): |
|
|
print("BasicAgent initialized.") |
|
|
self.multimodal_agent = CodeAgent( |
|
|
tools=[ |
|
|
VisitWebpageTool(), |
|
|
GoogleSearchTool("serper"), |
|
|
download_file, |
|
|
audio_to_text, |
|
|
WikipediaSearchTool(), |
|
|
get_wikipedia_page_url_by_year, |
|
|
get_wikipedia_section_tables, |
|
|
], |
|
|
model=OpenAIServerModel(model_id="gpt-4o"), |
|
|
additional_authorized_imports=[ |
|
|
"requests", |
|
|
"bs4", |
|
|
"markdownify", |
|
|
"wikipedia", |
|
|
"pandas", |
|
|
"io", |
|
|
"PIL", |
|
|
"img2text", |
|
|
"PIL.Image", |
|
|
"cv2", |
|
|
"numpy", |
|
|
"whisper", |
|
|
"openpyxl", |
|
|
"json", |
|
|
"wikipediaapi", |
|
|
"pytube", |
|
|
"pytubefix", |
|
|
"pytubefix.cli", |
|
|
"youtube_transcript_api", |
|
|
], |
|
|
name="multimodal_agent", |
|
|
description=""" |
|
|
This is a powerful agent, it specializes in: |
|
|
- Writing code to solve problem. |
|
|
- Solving hard Maths problems. |
|
|
- Browse the web to find information. |
|
|
- Reason across audio, vision, and text, a.k.a multimodal agent. """, |
|
|
max_steps=5, |
|
|
) |
|
|
|
|
|
self.manager_agent = CodeAgent( |
|
|
model=InferenceClientModel( |
|
|
model_id="Qwen/Qwen2.5-Coder-32B-Instruct", |
|
|
), |
|
|
tools=[ |
|
|
download_file, |
|
|
audio_to_text, |
|
|
get_wikipedia_page_url_by_year, |
|
|
get_wikipedia_section_tables, |
|
|
], |
|
|
managed_agents=[self.multimodal_agent], |
|
|
additional_authorized_imports=[ |
|
|
"requests", |
|
|
"bs4", |
|
|
"markdownify", |
|
|
"wikipedia", |
|
|
"io", |
|
|
"pandas", |
|
|
"PIL", |
|
|
"img2text", |
|
|
"PIL.Image", |
|
|
"cv2", |
|
|
"numpy", |
|
|
"openpyxl", |
|
|
"json", |
|
|
"wikipediaapi", |
|
|
"pytube", |
|
|
"pytubefix", |
|
|
"pytubefix.cli", |
|
|
"youtube_transcript_api", |
|
|
], |
|
|
planning_interval=2, |
|
|
max_steps=10, |
|
|
) |
|
|
|
|
|
def __call__(self, question: str, question_id: str, file_name: str) -> str: |
|
|
print(f"Agent received question: {question}") |
|
|
file = f"Provided data file: {file_name}" if file_name else "" |
|
|
metadata = {} |
|
|
metadata["question_id"] = question_id |
|
|
if file_name: |
|
|
metadata["file_name"] = file_name |
|
|
|
|
|
prompt = f""" |
|
|
Answer the following question: |
|
|
"{question}". |
|
|
Question metadata in JSON format: |
|
|
``` |
|
|
{json.dumps(metadata)} |
|
|
``` |
|
|
Follow below rules when possible: |
|
|
- Please take the question literally! Do not add any additional information or assumptions. |
|
|
- Please answer as concisely as possible. |
|
|
- If the question asks for a number, please return a numerical answer without unit (unless unit is specifically asked for). For example: 3 instead of three, 0 instead of None, 3 instead of $3. |
|
|
- If the question asks for a number with specific decimal places, please format the number into string with the same decimal places. For example: 3.00 instead of 3. |
|
|
- If the question asks for a list, please make sure that the elements are separated by a comma(`,`) and a space(` `). For example: `1, 2, 3` instead of `1,2,3`. |
|
|
- If the question asks for name without abbreviations, please ALWAYS ask `multimodal_agent` for the FULL name of final answer to ensure NO abbreviation is included in Final Answer. For example: `United States` instead of `US`. |
|
|
- To parse data from Wikipedia page, please use `get_wikipedia_section_tables` tool. |
|
|
""" |
|
|
if "food" in question.lower() or "drink" in question.lower(): |
|
|
prompt = f""" |
|
|
{prompt} |
|
|
- Be careful about the difference between food and drink items. For instance: Ice Cream is a food item! |
|
|
""" |
|
|
result = self.manager_agent.run(prompt) |
|
|
print(f"Agent responded with: {result}") |
|
|
return result |
|
|
|
|
|
|
|
|
def run_and_submit_all(question_id: str, profile: gr.OAuthProfile | None): |
|
|
""" |
|
|
Fetches all questions, runs the BasicAgent on them, submits all answers, |
|
|
and displays the results. |
|
|
""" |
|
|
|
|
|
space_id = os.getenv("SPACE_ID") |
|
|
|
|
|
if profile: |
|
|
username = f"{profile.username}" |
|
|
print(f"User logged in: {username}") |
|
|
else: |
|
|
print("User not logged in.") |
|
|
return "Please Login to Hugging Face with the button.", None |
|
|
|
|
|
api_url = DEFAULT_API_URL |
|
|
questions_url = f"{api_url}/questions" |
|
|
submit_url = f"{api_url}/submit" |
|
|
|
|
|
|
|
|
try: |
|
|
agent = BasicAgent() |
|
|
except Exception as e: |
|
|
print(f"Error instantiating agent: {e}") |
|
|
return f"Error initializing agent: {e}", None |
|
|
|
|
|
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" |
|
|
print(agent_code) |
|
|
|
|
|
|
|
|
print(f"Fetching questions from: {questions_url}") |
|
|
response = None |
|
|
try: |
|
|
response = requests.get(questions_url, timeout=15) |
|
|
response.raise_for_status() |
|
|
questions_data = response.json() |
|
|
if question_id: |
|
|
questions_data = [ |
|
|
item for item in questions_data if item.get("task_id") == question_id |
|
|
] |
|
|
if not questions_data: |
|
|
print("Fetched questions list is empty.") |
|
|
return "Fetched questions list is empty or invalid format.", None |
|
|
print(f"Fetched {len(questions_data)} questions.") |
|
|
except requests.exceptions.JSONDecodeError as e: |
|
|
print(f"Error decoding JSON response from questions endpoint: {e}") |
|
|
print(f"Response: {response}") |
|
|
return f"Error decoding server response for questions: {e}", None |
|
|
except requests.exceptions.RequestException as e: |
|
|
print(f"Error fetching questions: {e}") |
|
|
return f"Error fetching questions: {e}", None |
|
|
except Exception as e: |
|
|
print(f"An unexpected error occurred fetching questions: {e}") |
|
|
return f"An unexpected error occurred fetching questions: {e}", None |
|
|
|
|
|
|
|
|
results_log = [] |
|
|
answers_payload = [] |
|
|
print(f"Running agent on {len(questions_data)} questions...") |
|
|
for item in questions_data: |
|
|
print(f"Question data: {json.dumps(item)}") |
|
|
task_id = item.get("task_id") |
|
|
question_text = item.get("question") |
|
|
file_name = item.get("file_name") |
|
|
if not task_id or question_text is None: |
|
|
print(f"Skipping item with missing task_id or question: {item}") |
|
|
continue |
|
|
try: |
|
|
submitted_answer = agent(question_text, task_id, file_name) |
|
|
answers_payload.append( |
|
|
{"task_id": task_id, "submitted_answer": submitted_answer} |
|
|
) |
|
|
results_log.append( |
|
|
{ |
|
|
"Task ID": task_id, |
|
|
"Question": question_text, |
|
|
"Submitted Answer": submitted_answer, |
|
|
} |
|
|
) |
|
|
except Exception as e: |
|
|
print(f"Error running agent on task {task_id}: {e}") |
|
|
results_log.append( |
|
|
{ |
|
|
"Task ID": task_id, |
|
|
"Question": question_text, |
|
|
"Submitted Answer": f"AGENT ERROR: {e}", |
|
|
} |
|
|
) |
|
|
|
|
|
if not answers_payload: |
|
|
print("Agent did not produce any answers to submit.") |
|
|
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) |
|
|
|
|
|
|
|
|
submission_data = { |
|
|
"username": username.strip(), |
|
|
"agent_code": agent_code, |
|
|
"answers": answers_payload, |
|
|
} |
|
|
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." |
|
|
print(status_update) |
|
|
|
|
|
|
|
|
print(f"Submitting {len(answers_payload)} answers to: {submit_url}") |
|
|
try: |
|
|
print(f"Submission_data: {submission_data}") |
|
|
response = requests.post(submit_url, json=submission_data, timeout=60) |
|
|
response.raise_for_status() |
|
|
result_data = response.json() |
|
|
final_status = ( |
|
|
f"Submission Successful!\n" |
|
|
f"User: {result_data.get('username')}\n" |
|
|
f"Overall Score: {result_data.get('score', 'N/A')}% " |
|
|
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" |
|
|
f"Message: {result_data.get('message', 'No message received.')}" |
|
|
) |
|
|
print(f"Submission successful. Final status: {final_status}") |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return final_status, results_df |
|
|
except requests.exceptions.HTTPError as e: |
|
|
error_detail = f"Server responded with status {e.response.status_code}." |
|
|
try: |
|
|
error_json = e.response.json() |
|
|
error_detail += f" Detail: {error_json.get('detail', e.response.text)}" |
|
|
except requests.exceptions.JSONDecodeError: |
|
|
error_detail += f" Response: {e.response.text[:500]}" |
|
|
status_message = f"Submission Failed: {error_detail}" |
|
|
print(status_message) |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return status_message, results_df |
|
|
except requests.exceptions.Timeout: |
|
|
status_message = "Submission Failed: The request timed out." |
|
|
print(status_message) |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return status_message, results_df |
|
|
except requests.exceptions.RequestException as e: |
|
|
status_message = f"Submission Failed: Network error - {e}" |
|
|
print(status_message) |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return status_message, results_df |
|
|
except Exception as e: |
|
|
status_message = f"An unexpected error occurred during submission: {e}" |
|
|
print(status_message) |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return status_message, results_df |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# Basic Agent Evaluation Runner") |
|
|
gr.Markdown( |
|
|
""" |
|
|
**Instructions:** |
|
|
|
|
|
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... |
|
|
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. |
|
|
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. |
|
|
|
|
|
--- |
|
|
**Disclaimers:** |
|
|
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). |
|
|
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. |
|
|
""" |
|
|
) |
|
|
|
|
|
gr.LoginButton() |
|
|
|
|
|
question_id = gr.Textbox( |
|
|
label="Question id to solve (empty to solve all)", |
|
|
lines=1, |
|
|
interactive=True, |
|
|
value="7bd855d8-463d-4ed5-93ca-5fe35145f733", |
|
|
) |
|
|
run_button = gr.Button("Run Evaluation & Submit All Answers") |
|
|
status_output = gr.Textbox( |
|
|
label="Run Status / Submission Result", lines=5, interactive=False |
|
|
) |
|
|
|
|
|
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) |
|
|
|
|
|
run_button.click( |
|
|
fn=run_and_submit_all, |
|
|
inputs=[question_id], |
|
|
outputs=[status_output, results_table], |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("\n" + "-" * 30 + " App Starting " + "-" * 30) |
|
|
|
|
|
space_host_startup = os.getenv("SPACE_HOST") |
|
|
space_id_startup = os.getenv("SPACE_ID") |
|
|
|
|
|
if space_host_startup: |
|
|
print(f"✅ SPACE_HOST found: {space_host_startup}") |
|
|
print(f" Runtime URL should be: https://{space_host_startup}.hf.space") |
|
|
else: |
|
|
print("ℹ️ SPACE_HOST environment variable not found (running locally?).") |
|
|
|
|
|
if space_id_startup: |
|
|
print(f"✅ SPACE_ID found: {space_id_startup}") |
|
|
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") |
|
|
print( |
|
|
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main" |
|
|
) |
|
|
else: |
|
|
print( |
|
|
"ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined." |
|
|
) |
|
|
|
|
|
print("-" * (60 + len(" App Starting ")) + "\n") |
|
|
|
|
|
print("Launching Gradio Interface for Basic Agent Evaluation...") |
|
|
demo.launch(debug=True, share=False) |
|
|
|