import os import gradio as gr import requests import inspect import re import pandas as pd import openai from dotenv import load_dotenv from typing import Optional import base64 from bs4 import BeautifulSoup import wikipedia from smolagents import CodeAgent, InferenceClientModel, tool # (Keep Constants as is) # --- Constants --- load_dotenv() # Load environment variables from .env file if it exists DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # --- Basic Agent Definition --- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------ HEADERS = {"User-Agent": "Mozilla/5.0"} def _flatten_multiindex(df: pd.DataFrame) -> pd.DataFrame: """Flattens MultiIndex column headers into single-string labels.""" if isinstance(df.columns, pd.MultiIndex): df.columns = [ " ".join(filter(None, map(str, tup))).strip() for tup in df.columns.values ] return df @tool def web_search(query: str) -> str: """ Performs a web search using SerpAPI and extracts readable content from the top result. Args: query: The search term to look up online. """ try: serp_api_key = os.getenv("SERPAPI_KEY") serp_res = requests.get("https://serpapi.com/search", params={ "q": query, "engine": "google", "api_key": serp_api_key, "num": 5 }, timeout=10).json() for result in serp_res.get("organic_results", [])[:3]: url = result.get("link") if not url: continue try: html = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10).text soup = BeautifulSoup(html, "html.parser") for tag in soup(["script", "style", "header", "footer", "nav", "aside"]): tag.decompose() text = soup.get_text(separator="\n") lines = [line.strip() for line in text.splitlines() if line.strip()] return f"Source: {url}\n\n" + "\n".join(lines[:100]) except Exception: continue return "No good content found in top search results." except Exception as e: return f"Search failed: {e}" @tool def image_analysis(image_path: str) -> str: """ Analyzes an image using GPT-4o and describes its contents. Args: image_path: Path to the image file to analyze. """ client = openai.OpenAI() with open(image_path, "rb") as img: b64 = base64.b64encode(img.read()).decode("utf-8") res = client.chat.completions.create( model="gpt-4o", messages=[ { "role": "user", "content": [ {"type": "text", "text": "Describe this image."}, {"type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{b64}", "detail": "auto" }} ] } ], temperature=0.3 ) return res.choices[0].message.content.strip() @tool def youtube_quote(url: str, pattern: str) -> str: """ Return the first transcript line in a YouTube video that matches *pattern*. Args: url (str): Full YouTube watch URL (e.g. ``https://www.youtube.com/watch?v=dQw4w9WgXcQ``). pattern (str): Case-insensitive regular expression to search for. Returns: str: The matching line, or an explanatory message if none is found. """ try: from youtube_transcript_api import YouTubeTranscriptApi, NoTranscriptFound video_id = re.search(r"[?&]v=([\w-]{11})", url) if not video_id: return "Invalid YouTube URL." vid = video_id.group(1) transcript = YouTubeTranscriptApi.get_transcript(vid, languages=["en"]) for entry in transcript: if re.search(pattern, entry["text"], re.I): return entry["text"].strip() return "Line not found." except NoTranscriptFound: return "No transcript available." except Exception as e: return f"youtube_quote error: {e}" @tool def commutativity_counterexample(table_csv: str) -> str: """ Given a CSV encoding a binary-operation table on the set ``{a,b,c,d,e}``, return the subset of elements witnessing non-commutativity. Args: table_csv (str): CSV string with row/column labels identical and in the same order. Returns: str: Sorted comma-separated symbols that break commutativity, or ``"Commutative"`` if none are found. """ df = pd.read_csv(io.StringIO(table_csv), index_col=0) witnesses = set() for a in df.index: for b in df.columns: if df.at[a, b] != df.at[b, a]: witnesses.update([a, b]) return ", ".join(sorted(witnesses)) if witnesses else "Commutative" @tool def pdf_find_string(pdf_url: str, pattern: str) -> str: """ Search a PDF for the first occurrence of *pattern*. Args: pdf_url (str): Direct or relative URL of the PDF (HTTP/HTTPS). pattern (str): Case-insensitive regular-expression to locate. Returns: str: The first captured group / match or a “not found” message. """ try: import pdfplumber with pdfplumber.open(requests.get(pdf_url, stream=True, headers=HEADERS).raw) as pdf: text = "\n".join(page.extract_text() or "" for page in pdf.pages) match = re.search(pattern, text, re.I) return match.group(1) if match else "Not found." except Exception as e: return f"pdf_find_string error: {e}" @tool def olympic_min_athletes(year: int = 1928) -> str: """ Return the NOC code of the nation with the fewest athletes at a given Summer Olympics. Args: year (int, optional): Four-digit Summer Games year. Defaults to 1928. Returns: str: Three-letter NOC code, or an error string on failure. """ url = f"https://en.wikipedia.org/wiki/{year}_Summer_Olympics" try: df = next(t for t in pd.read_html(url) if "Athletes" in t.columns) min_val = df["Athletes"].min() subset = df[df["Athletes"] == min_val] code = subset["NOC code" if "NOC code" in df.columns else "NOC"].iloc[0] return code except Exception as e: return f"olympic_min_athletes error: {e}" @tool def npb_adjacent_numbers(player_last_name: str, team: str = "Hokkaido Nippon-Ham Fighters") -> str: """ For a given player on a Nippon Professional Baseball (NPB) roster, return the last names of the players whose jersey numbers are immediately before and after that player’s number. Args: player_last_name (str): Surname (or part of it) to search for. team (str, optional): Team name as used in the Wikipedia roster section. Defaults to ``"Hokkaido Nippon-Ham Fighters"``. Returns: str: ``", "`` or an explanatory message. """ url = "https://en.wikipedia.org/wiki/Hokkaido_Nippon-Ham_Fighters#Current_roster" try: tables = pd.read_html(url) roster = pd.concat(tables) roster.columns = [str(c) for c in roster.columns] row = roster[roster.apply(lambda r: player_last_name.lower() in " ".join(map(str, r)).lower(), axis=1)] if row.empty: return "Player not found." num = int(row.iloc[0]["No."]) before = roster[roster["No."] == num - 1]["Name"].iloc[0].split()[-1] after = roster[roster["No."] == num + 1]["Name"].iloc[0].split()[-1] return f"{before}, {after}" except Exception as e: return f"npb_adjacent_numbers error: {e}" @tool def vegetable_filter(items: str) -> str: """ Filter a comma-separated grocery list down to recognised vegetables. Args: items (str): Items separated by commas (case-insensitive). Returns: str: Alphabetically-sorted vegetables, comma-separated, or an empty string if none are present. """ veggies = { "sweet potatoes", "green beans", "corn", "bell pepper", "broccoli", "celery", "zucchini", "lettuce" } found = [i for i in map(str.strip, items.split(",")) if i.lower() in veggies] return ", ".join(sorted(found)) @tool def malko_first_name() -> str: """ Return the *first* name of the earliest post-1977 winner of the Nikolai Malko Conductors Competition who represented a now-defunct country. Returns: str: First name of that conductor, or an error message. """ url = "https://en.wikipedia.org/wiki/Nikolai_Malko_Competition" try: tables = pd.read_html(url) winners = tables[0] # filter after 1977, nationality no longer existing (e.g., Yugoslavia, USSR, Czechoslovakia) old_countries = {"Yugoslavia", "U.S.S.R.", "USSR", "Czechoslovakia", "U.S.S.R", "Soviet Union"} subset = winners[winners["Year"] > 1977] subset = subset[subset["Nationality"].isin(old_countries)] first_name = str(subset.iloc[0]["Winner"]).split()[0] return first_name except Exception as e: return f"malko_first_name error: {e}" @tool def excel_sum_food(xlsx_path: str) -> str: """ Sum the “USD Sales” column for rows whose “Category” contains the word “food”. Args: xlsx_path (str): Path to an Excel workbook file. Returns: str: Total formatted to two decimals. """ df = pd.read_excel(xlsx_path) food_df = df[df["Category"].str.contains("food", case=False, na=False)] total = food_df["USD Sales"].sum() return f"{total:.2f}" @tool def nasa_award_from_article(article_url: str) -> str: """ Extract a NASA award number cited in the first PDF linked from an article. Args: article_url (str): Web page containing (exactly one) PDF link. Returns: str: The award number or an explanatory failure message. """ try: soup = BeautifulSoup(requests.get(article_url, headers=HEADERS).text, "html.parser") pdf_link = soup.find("a", href=re.compile(r"\.pdf$")) if not pdf_link: return "PDF link not found." pdf_url = pdf_link["href"] if pdf_link["href"].startswith("http") else requests.compat.urljoin(article_url, pdf_link["href"]) import pdfplumber with pdfplumber.open(requests.get(pdf_url, stream=True, headers=HEADERS).raw) as pdf: text = "\n".join(page.extract_text() or "" for page in pdf.pages) match = re.search(r"NASA award number\s+([A-Z0-9-]+)", text, re.I) return match.group(1) if match else "Award number not found." except Exception as e: return f"nasa_award_from_article error: {e}" @tool def baseball_stat(player: str, season: int, stat: str) -> str: """ Fetch a single statistic for an MLB player from Baseball-Reference. Args: player (str): Full player name (e.g. ``"Babe Ruth"``). season (int): Four-digit season year. stat (str): Column header exactly as it appears in the table (e.g. ``"HR"``). Returns: str: The requested stat value or an error message. """ base = f"https://www.baseball-reference.com/players/{player[0].lower()}/{player[:5].lower()}{player.split()[-1][:2].lower()}01.shtml" try: df = pd.read_html(base)[0] row = df[df["Year"] == season] if row.empty: return "Season not found." return str(row.iloc[0][stat]) except Exception as e: return f"baseball_stat error: {e}" @tool def safe_python_eval(code: str) -> str: """ Execute user-supplied Python code in a RestrictedPython sandbox. Args: code (str): The code to run. The special variable ``_`` may capture the last expression’s value. Returns: str: Captured stdout plus the value of ``_`` (if any), or an error string. """ import restrictedpython as rp try: compiled = rp.compile_restricted(code, filename="", mode="exec") loc: Dict[str, Any] = {} with io.StringIO() as buf, contextlib.redirect_stdout(buf): exec(compiled, {"__builtins__": rp.utility_builtins}, loc) output = buf.getvalue() # fetch last expression result if stored under _ last = loc.get("_", "") return (output + str(last)).strip() except Exception as e: return f"safe_python_eval error: {e}" @tool def actor_role_lookup(actor_full_name: str) -> str: """ Find the given actor’s character first-name in the Polish TV series *Magda M.*. Args: actor_full_name (str): Actor’s full name as listed on Wikipedia. Returns: str: Character first-name or “Not found.”. """ url = "https://pl.wikipedia.org/wiki/Magda_M._(serial_telewizyjny)" try: tables = pd.read_html(url) cast = pd.concat(tables, ignore_index=True) row = cast[cast.apply(lambda r: actor_full_name in " ".join(map(str, r)), axis=1)] if row.empty: return "Not found." char_cell = row.iloc[0][1] first_name = str(char_cell).split()[0] return first_name except Exception as e: return f"actor_role_lookup error: {e}" @tool def libretext_lookup() -> str: """Returns the vet surname from LibreTexts chemistry 1.E Exercises.""" url = "https://chem.libretexts.org/Bookshelves/General_Chemistry/Introductory_Chemistry_(CK-12)/01%3A_Introduction_to_Chemistry/1.E%3A_Exercises" try: soup = BeautifulSoup(requests.get(url, headers=HEADERS).text, "html.parser") txt = soup.get_text("\n") match = re.search(r"([A-Z][a-z]+)\s+is an equine veterinarian", txt) return match.group(1) if match else "Surname not found." except Exception as e: return f"libretext_lookup error: {e}" @tool def featured_article_nominator(article_title: str) -> str: """ Return the nominator(s) of a Wikipedia Featured Article promoted in November 2016. Args: article_title (str): Exact or substring match of the article title. Returns: str: Nominator names with footnote markers removed, or an explanatory message if not found. """ try: log_url = "https://en.wikipedia.org/wiki/Wikipedia:Featured_articles/log/2016" tables = pd.read_html(log_url) # pick the table that has both the “Article” and “Nominator(s)” columns df = next( t for t in tables if {"Article", "Nominator(s)"}.issubset(t.columns) ) except StopIteration: return "Could not find the FA log table." except Exception as e: return f"Error loading FA log: {e}" # Exact- or substring match on the Article column row = df[df["Article"].str.contains(article_title, case=False, na=False)] if row.empty: return "Article not found in the November 2016 FA log." nominators = row.iloc[0]["Nominator(s)"] # remove citation footnotes like [1], [note a], etc. clean = re.sub(r"\[.*?]", "", str(nominators)).strip() return clean or "Nominator not recorded." @tool def chess_from_image(image_path: str) -> str: """ Analyse a chess diagram (Black to move) and return the engine’s best move. Args: image_path (str): Local file path to a chessboard image. Returns: str: Move in algebraic notation, or a “TODO” stub. """ return "TODO: chess image analysis not implemented." @tool def whisper_transcribe(audio_path: str, scope: str = "full") -> str: """ Transcribe audio using OpenAI Whisper. Args: audio_path (str): Path to an audio file supported by Whisper. scope (str, optional): Portion of the output to return: ``"full"``, ``"filling"``, or ``"pages"``. Defaults to ``"full"``. Returns: str: The transcription (or the requested subset) or an error message. """ try: import openai client = openai.OpenAI() with open(audio_path, "rb") as f: transcript = client.audio.transcriptions.create(model="whisper-1", file=f) text = transcript.text.strip() if scope == "filling": # Return only list after the word "filling" if present seg = re.split(r"filling|filling:|for the filling", text, flags=re.I) return seg[-1].strip() if len(seg) > 1 else text return text except Exception as e: return f"whisper_transcribe error: {e}" @tool def youtube_video_birdcount(url: str, frame_skip: int = 15) -> str: """ (Stub) Estimate the maximum number of bird species visible simultaneously in a video. Args: url (str): Full YouTube watch URL. frame_skip (int, optional): Analyse every *n*-th frame. Defaults to 15. Returns: str: Placeholder text until the vision model is implemented. """ return "TODO: bird species detection not implemented." @tool def discography_search( artist: str, start: int | None = None, end: int | None = None ) -> str: """ Return a list (or count) of studio albums by *artist* optionally filtered by release year. Args: artist (str): Band or solo-artist name (e.g. ``"Radiohead"``). start (int | None, optional): Earliest year to include, inclusive. If provided together with *end*, the function returns **only the count** of albums in the range. Defaults to ``None``. end (int | None, optional): Latest year to include, inclusive. Must be supplied with *start* to take effect. Defaults to ``None``. Returns: str: • One bullet-per-album (``"• Title (Year)"``) • *or* an integer count as a string when both *start* and *end* are given. • Error message if the discography page cannot be parsed. """ wikipedia.set_lang("en") target = f"{artist} discography" def _get_page(title: str) -> Optional[wikipedia.WikipediaPage]: try: return wikipedia.page(title, auto_suggest=False) except Exception: return None page = _get_page(target) or next((p for p in ( _get_page(t) for t in wikipedia.search(target)[:5] ) if p), None) if page is None: return "No Wikipedia discography page found." albums: list[str] = [] try: tables = [_flatten_multiindex(t) for t in pd.read_html(page.url, flavor="bs4")] studio = next( t for t in tables if any(re.search(r"studio", c, re.I) for c in t.columns) ) title_col = next(c for c in studio.columns if re.search(r"(title|album)", c, re.I)) year_col = next((c for c in studio.columns if re.search(r"year", c, re.I)), None) for _, row in studio.iterrows(): title = re.sub(r"\[.*?]", "", str(row[title_col])).strip() year_match = re.search(r"(\d{4})", str(row[year_col] if year_col else "")) year = int(year_match.group(1)) if year_match else None albums.append((title, year)) except Exception as e: return f"Error parsing discography tables: {e}" if start is not None and end is not None: return str(sum(1 for _, y in albums if y and start <= y <= end)) return "\n".join(f"• {t} ({y})" for t, y in albums) toolkit = [ discography_search, youtube_quote, youtube_video_birdcount, whisper_transcribe, chess_from_image, featured_article_nominator, commutativity_counterexample, libretext_lookup, actor_role_lookup, safe_python_eval, baseball_stat, nasa_award_from_article, pdf_find_string, olympic_min_athletes, npb_adjacent_numbers, excel_sum_food, malko_first_name, vegetable_filter, ] model = InferenceClientModel( model_id="gpt-4.1", # or "gpt-3.5-turbo" provider="openai", api_key=os.environ.get("OPENAI_API_KEY") ) # --- Agent Class --- agent = CodeAgent( name="BasicAgent", description="An agent capable of answering questions using various tools for its tasks.", tools=toolkit, model=model, planning_interval=5 ) def run_and_submit_all( profile: gr.OAuthProfile | None): """ Fetches all questions, runs the BasicAgent on them, submits all answers, and displays the results. """ # --- Determine HF Space Runtime URL and Repo URL --- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code if profile: username= f"{profile.username}" print(f"User logged in: {username}") else: print("User not logged in.") return "Please Login to Hugging Face with the button.", None api_url = DEFAULT_API_URL questions_url = f"{api_url}/questions" submit_url = f"{api_url}/submit" # 1. Instantiate Agent ( modify this part to create your agent) global agent # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public) agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" print(agent_code) # 2. Fetch Questions print(f"Fetching questions from: {questions_url}") try: response = requests.get(questions_url, timeout=15) response.raise_for_status() questions_data = response.json() if not questions_data: print("Fetched questions list is empty.") return "Fetched questions list is empty or invalid format.", None print(f"Fetched {len(questions_data)} questions.") except requests.exceptions.RequestException as e: print(f"Error fetching questions: {e}") return f"Error fetching questions: {e}", None except requests.exceptions.JSONDecodeError as e: print(f"Error decoding JSON response from questions endpoint: {e}") print(f"Response text: {response.text[:500]}") return f"Error decoding server response for questions: {e}", None except Exception as e: print(f"An unexpected error occurred fetching questions: {e}") return f"An unexpected error occurred fetching questions: {e}", None # 3. Run your Agent results_log = [] answers_payload = [] print(f"Running agent on {len(questions_data)} questions...") for item in questions_data: task_id = item.get("task_id") question_text = item.get("question") if not task_id or question_text is None: print(f"Skipping item with missing task_id or question: {item}") continue try: submitted_answer = agent(question_text) answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) except Exception as e: print(f"Error running agent on task {task_id}: {e}") results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) if not answers_payload: print("Agent did not produce any answers to submit.") return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) # 4. Prepare Submission submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." print(status_update) # 5. Submit print(f"Submitting {len(answers_payload)} answers to: {submit_url}") try: response = requests.post(submit_url, json=submission_data, timeout=60) response.raise_for_status() result_data = response.json() final_status = ( f"Submission Successful!\n" f"User: {result_data.get('username')}\n" f"Overall Score: {result_data.get('score', 'N/A')}% " f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" f"Message: {result_data.get('message', 'No message received.')}" ) print("Submission successful.") results_df = pd.DataFrame(results_log) return final_status, results_df except requests.exceptions.HTTPError as e: error_detail = f"Server responded with status {e.response.status_code}." try: error_json = e.response.json() error_detail += f" Detail: {error_json.get('detail', e.response.text)}" except requests.exceptions.JSONDecodeError: error_detail += f" Response: {e.response.text[:500]}" status_message = f"Submission Failed: {error_detail}" print(status_message) results_df = pd.DataFrame(results_log) return status_message, results_df except requests.exceptions.Timeout: status_message = "Submission Failed: The request timed out." print(status_message) results_df = pd.DataFrame(results_log) return status_message, results_df except requests.exceptions.RequestException as e: status_message = f"Submission Failed: Network error - {e}" print(status_message) results_df = pd.DataFrame(results_log) return status_message, results_df except Exception as e: status_message = f"An unexpected error occurred during submission: {e}" print(status_message) results_df = pd.DataFrame(results_log) return status_message, results_df # --- Build Gradio Interface using Blocks --- with gr.Blocks() as demo: gr.Markdown("# Basic Agent Evaluation Runner") gr.Markdown( """ **Instructions:** 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. --- **Disclaimers:** Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. """ ) gr.LoginButton() run_button = gr.Button("Run Evaluation & Submit All Answers") status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) # Removed max_rows=10 from DataFrame constructor results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) run_button.click( fn=run_and_submit_all, outputs=[status_output, results_table] ) if __name__ == "__main__": print("\n" + "-"*30 + " App Starting " + "-"*30) # Check for SPACE_HOST and SPACE_ID at startup for information space_host_startup = os.getenv("SPACE_HOST") space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup if space_host_startup: print(f"✅ SPACE_HOST found: {space_host_startup}") print(f" Runtime URL should be: https://{space_host_startup}.hf.space") else: print("ℹ️ SPACE_HOST environment variable not found (running locally?).") if space_id_startup: # Print repo URLs if SPACE_ID is found print(f"✅ SPACE_ID found: {space_id_startup}") print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") else: print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") print("-"*(60 + len(" App Starting ")) + "\n") print("Launching Gradio Interface for Basic Agent Evaluation...") demo.launch(debug=True, share=False)