Jroussel72 commited on
Commit
0e36cc6
·
1 Parent(s): 81917a3

Refactor app.py to enhance agent functionality and add new tools for web search, image analysis, YouTube transcript retrieval, and more.

Browse files
Files changed (1) hide show
  1. app.py +547 -13
app.py CHANGED
@@ -2,22 +2,560 @@ import os
2
  import gradio as gr
3
  import requests
4
  import inspect
 
5
  import pandas as pd
 
 
 
 
 
 
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
 
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -39,11 +577,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
39
  submit_url = f"{api_url}/submit"
40
 
41
  # 1. Instantiate Agent ( modify this part to create your agent)
42
- try:
43
- agent = BasicAgent()
44
- except Exception as e:
45
- print(f"Error instantiating agent: {e}")
46
- return f"Error initializing agent: {e}", None
47
  # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
  print(agent_code)
 
2
  import gradio as gr
3
  import requests
4
  import inspect
5
+ import re
6
  import pandas as pd
7
+ import openai
8
+ from dotenv import load_dotenv
9
+ from typing import Optional
10
+ import base64
11
+ from bs4 import BeautifulSoup
12
+ import wikipedia
13
+ from smolagents import CodeAgent, InferenceClientModel, tool
14
+
15
 
16
  # (Keep Constants as is)
17
  # --- Constants ---
18
+ load_dotenv() # Load environment variables from .env file if it exists
19
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
20
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
21
 
22
  # --- Basic Agent Definition ---
23
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
24
+ HEADERS = {"User-Agent": "Mozilla/5.0"}
25
+
26
+ def _flatten_multiindex(df: pd.DataFrame) -> pd.DataFrame:
27
+ """Flattens MultiIndex column headers into single-string labels."""
28
+ if isinstance(df.columns, pd.MultiIndex):
29
+ df.columns = [
30
+ " ".join(filter(None, map(str, tup))).strip()
31
+ for tup in df.columns.values
32
+ ]
33
+ return df
34
+
35
+ @tool
36
+ def web_search(query: str) -> str:
37
+ """
38
+ Performs a web search using SerpAPI and extracts readable content from the top result.
39
+
40
+ Args:
41
+ query: The search term to look up online.
42
+ """
43
+ try:
44
+ serp_api_key = os.getenv("SERPAPI_KEY")
45
+ serp_res = requests.get("https://serpapi.com/search", params={
46
+ "q": query,
47
+ "engine": "google",
48
+ "api_key": serp_api_key,
49
+ "num": 5
50
+ }, timeout=10).json()
51
+
52
+ for result in serp_res.get("organic_results", [])[:3]:
53
+ url = result.get("link")
54
+ if not url:
55
+ continue
56
+ try:
57
+ html = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10).text
58
+ soup = BeautifulSoup(html, "html.parser")
59
+ for tag in soup(["script", "style", "header", "footer", "nav", "aside"]):
60
+ tag.decompose()
61
+ text = soup.get_text(separator="\n")
62
+ lines = [line.strip() for line in text.splitlines() if line.strip()]
63
+ return f"Source: {url}\n\n" + "\n".join(lines[:100])
64
+ except Exception:
65
+ continue
66
+ return "No good content found in top search results."
67
+ except Exception as e:
68
+ return f"Search failed: {e}"
69
+
70
+ @tool
71
+ def image_analysis(image_path: str) -> str:
72
+ """
73
+ Analyzes an image using GPT-4o and describes its contents.
74
+
75
+ Args:
76
+ image_path: Path to the image file to analyze.
77
+ """
78
+ client = openai.OpenAI()
79
+ with open(image_path, "rb") as img:
80
+ b64 = base64.b64encode(img.read()).decode("utf-8")
81
+
82
+ res = client.chat.completions.create(
83
+ model="gpt-4o",
84
+ messages=[
85
+ {
86
+ "role": "user",
87
+ "content": [
88
+ {"type": "text", "text": "Describe this image."},
89
+ {"type": "image_url", "image_url": {
90
+ "url": f"data:image/jpeg;base64,{b64}",
91
+ "detail": "auto"
92
+ }}
93
+ ]
94
+ }
95
+ ],
96
+ temperature=0.3
97
+ )
98
+ return res.choices[0].message.content.strip()
99
+
100
+ @tool
101
+ def youtube_quote(url: str, pattern: str) -> str:
102
+ """
103
+ Return the first transcript line in a YouTube video that matches *pattern*.
104
+
105
+ Args:
106
+ url (str): Full YouTube watch URL
107
+ (e.g. ``https://www.youtube.com/watch?v=dQw4w9WgXcQ``).
108
+ pattern (str): Case-insensitive regular expression to search for.
109
+
110
+ Returns:
111
+ str: The matching line, or an explanatory message if none is found.
112
+ """
113
+ try:
114
+ from youtube_transcript_api import YouTubeTranscriptApi, NoTranscriptFound
115
+ video_id = re.search(r"[?&]v=([\w-]{11})", url)
116
+ if not video_id:
117
+ return "Invalid YouTube URL."
118
+ vid = video_id.group(1)
119
+ transcript = YouTubeTranscriptApi.get_transcript(vid, languages=["en"])
120
+ for entry in transcript:
121
+ if re.search(pattern, entry["text"], re.I):
122
+ return entry["text"].strip()
123
+ return "Line not found."
124
+ except NoTranscriptFound:
125
+ return "No transcript available."
126
+ except Exception as e:
127
+ return f"youtube_quote error: {e}"
128
+
129
+ @tool
130
+ def commutativity_counterexample(table_csv: str) -> str:
131
+ """
132
+ Given a CSV encoding a binary-operation table on the set ``{a,b,c,d,e}``,
133
+ return the subset of elements witnessing non-commutativity.
134
+
135
+ Args:
136
+ table_csv (str): CSV string with row/column labels identical and in the same order.
137
+
138
+ Returns:
139
+ str: Sorted comma-separated symbols that break commutativity,
140
+ or ``"Commutative"`` if none are found.
141
+ """
142
+ df = pd.read_csv(io.StringIO(table_csv), index_col=0)
143
+ witnesses = set()
144
+ for a in df.index:
145
+ for b in df.columns:
146
+ if df.at[a, b] != df.at[b, a]:
147
+ witnesses.update([a, b])
148
+ return ", ".join(sorted(witnesses)) if witnesses else "Commutative"
149
+
150
+ @tool
151
+ def pdf_find_string(pdf_url: str, pattern: str) -> str:
152
+ """
153
+ Search a PDF for the first occurrence of *pattern*.
154
+
155
+ Args:
156
+ pdf_url (str): Direct or relative URL of the PDF (HTTP/HTTPS).
157
+ pattern (str): Case-insensitive regular-expression to locate.
158
+
159
+ Returns:
160
+ str: The first captured group / match or a “not found” message.
161
+ """
162
+
163
+ try:
164
+ import pdfplumber
165
+ with pdfplumber.open(requests.get(pdf_url, stream=True, headers=HEADERS).raw) as pdf:
166
+ text = "\n".join(page.extract_text() or "" for page in pdf.pages)
167
+ match = re.search(pattern, text, re.I)
168
+ return match.group(1) if match else "Not found."
169
+ except Exception as e:
170
+ return f"pdf_find_string error: {e}"
171
+
172
+ @tool
173
+ def olympic_min_athletes(year: int = 1928) -> str:
174
+ """
175
+ Return the NOC code of the nation with the fewest athletes at a given Summer Olympics.
176
+
177
+ Args:
178
+ year (int, optional): Four-digit Summer Games year. Defaults to 1928.
179
+
180
+ Returns:
181
+ str: Three-letter NOC code, or an error string on failure.
182
+ """
183
+ url = f"https://en.wikipedia.org/wiki/{year}_Summer_Olympics"
184
+ try:
185
+ df = next(t for t in pd.read_html(url) if "Athletes" in t.columns)
186
+ min_val = df["Athletes"].min()
187
+ subset = df[df["Athletes"] == min_val]
188
+ code = subset["NOC code" if "NOC code" in df.columns else "NOC"].iloc[0]
189
+ return code
190
+ except Exception as e:
191
+ return f"olympic_min_athletes error: {e}"
192
+
193
+ @tool
194
+ def npb_adjacent_numbers(player_last_name: str, team: str = "Hokkaido Nippon-Ham Fighters") -> str:
195
+ """
196
+ For a given player on a Nippon Professional Baseball (NPB) roster,
197
+ return the last names of the players whose jersey numbers are
198
+ immediately before and after that player’s number.
199
+
200
+ Args:
201
+ player_last_name (str): Surname (or part of it) to search for.
202
+ team (str, optional): Team name as used in the Wikipedia roster section.
203
+ Defaults to ``"Hokkaido Nippon-Ham Fighters"``.
204
+
205
+ Returns:
206
+ str: ``"<previous>, <next>"`` or an explanatory message.
207
+ """
208
+ url = "https://en.wikipedia.org/wiki/Hokkaido_Nippon-Ham_Fighters#Current_roster"
209
+ try:
210
+ tables = pd.read_html(url)
211
+ roster = pd.concat(tables)
212
+ roster.columns = [str(c) for c in roster.columns]
213
+ row = roster[roster.apply(lambda r: player_last_name.lower() in " ".join(map(str, r)).lower(), axis=1)]
214
+ if row.empty:
215
+ return "Player not found."
216
+ num = int(row.iloc[0]["No."])
217
+ before = roster[roster["No."] == num - 1]["Name"].iloc[0].split()[-1]
218
+ after = roster[roster["No."] == num + 1]["Name"].iloc[0].split()[-1]
219
+ return f"{before}, {after}"
220
+ except Exception as e:
221
+ return f"npb_adjacent_numbers error: {e}"
222
+
223
+ @tool
224
+ def vegetable_filter(items: str) -> str:
225
+ """
226
+ Filter a comma-separated grocery list down to recognised vegetables.
227
+
228
+ Args:
229
+ items (str): Items separated by commas (case-insensitive).
230
+
231
+ Returns:
232
+ str: Alphabetically-sorted vegetables, comma-separated,
233
+ or an empty string if none are present.
234
+ """
235
+ veggies = {
236
+ "sweet potatoes", "green beans", "corn", "bell pepper", "broccoli", "celery", "zucchini", "lettuce"
237
+ }
238
+ found = [i for i in map(str.strip, items.split(",")) if i.lower() in veggies]
239
+ return ", ".join(sorted(found))
240
+
241
+ @tool
242
+ def malko_first_name() -> str:
243
+ """
244
+ Return the *first* name of the earliest post-1977 winner of the
245
+ Nikolai Malko Conductors Competition who represented a now-defunct country.
246
+
247
+ Returns:
248
+ str: First name of that conductor, or an error message.
249
+ """
250
+ url = "https://en.wikipedia.org/wiki/Nikolai_Malko_Competition"
251
+ try:
252
+ tables = pd.read_html(url)
253
+ winners = tables[0]
254
+ # filter after 1977, nationality no longer existing (e.g., Yugoslavia, USSR, Czechoslovakia)
255
+ old_countries = {"Yugoslavia", "U.S.S.R.", "USSR", "Czechoslovakia", "U.S.S.R", "Soviet Union"}
256
+ subset = winners[winners["Year"] > 1977]
257
+ subset = subset[subset["Nationality"].isin(old_countries)]
258
+ first_name = str(subset.iloc[0]["Winner"]).split()[0]
259
+ return first_name
260
+ except Exception as e:
261
+ return f"malko_first_name error: {e}"
262
+
263
+ @tool
264
+ def excel_sum_food(xlsx_path: str) -> str:
265
+ """
266
+ Sum the “USD Sales” column for rows whose “Category” contains the word “food”.
267
+
268
+ Args:
269
+ xlsx_path (str): Path to an Excel workbook file.
270
+
271
+ Returns:
272
+ str: Total formatted to two decimals.
273
+ """
274
+ df = pd.read_excel(xlsx_path)
275
+ food_df = df[df["Category"].str.contains("food", case=False, na=False)]
276
+ total = food_df["USD Sales"].sum()
277
+ return f"{total:.2f}"
278
+
279
+ @tool
280
+ def nasa_award_from_article(article_url: str) -> str:
281
+ """
282
+ Extract a NASA award number cited in the first PDF linked from an article.
283
+
284
+ Args:
285
+ article_url (str): Web page containing (exactly one) PDF link.
286
+
287
+ Returns:
288
+ str: The award number or an explanatory failure message.
289
+ """
290
+ try:
291
+ soup = BeautifulSoup(requests.get(article_url, headers=HEADERS).text, "html.parser")
292
+ pdf_link = soup.find("a", href=re.compile(r"\.pdf$"))
293
+ if not pdf_link:
294
+ return "PDF link not found."
295
+ pdf_url = pdf_link["href"] if pdf_link["href"].startswith("http") else requests.compat.urljoin(article_url, pdf_link["href"])
296
+ import pdfplumber
297
+ with pdfplumber.open(requests.get(pdf_url, stream=True, headers=HEADERS).raw) as pdf:
298
+ text = "\n".join(page.extract_text() or "" for page in pdf.pages)
299
+ match = re.search(r"NASA award number\s+([A-Z0-9-]+)", text, re.I)
300
+ return match.group(1) if match else "Award number not found."
301
+ except Exception as e:
302
+ return f"nasa_award_from_article error: {e}"
303
+
304
+ @tool
305
+ def baseball_stat(player: str, season: int, stat: str) -> str:
306
+ """
307
+ Fetch a single statistic for an MLB player from Baseball-Reference.
308
+
309
+ Args:
310
+ player (str): Full player name (e.g. ``"Babe Ruth"``).
311
+ season (int): Four-digit season year.
312
+ stat (str): Column header exactly as it appears in the table (e.g. ``"HR"``).
313
+
314
+ Returns:
315
+ str: The requested stat value or an error message.
316
+ """
317
+ base = f"https://www.baseball-reference.com/players/{player[0].lower()}/{player[:5].lower()}{player.split()[-1][:2].lower()}01.shtml"
318
+ try:
319
+ df = pd.read_html(base)[0]
320
+ row = df[df["Year"] == season]
321
+ if row.empty:
322
+ return "Season not found."
323
+ return str(row.iloc[0][stat])
324
+ except Exception as e:
325
+ return f"baseball_stat error: {e}"
326
+
327
+ @tool
328
+ def safe_python_eval(code: str) -> str:
329
+ """
330
+ Execute user-supplied Python code in a RestrictedPython sandbox.
331
+
332
+ Args:
333
+ code (str): The code to run. The special variable ``_`` may capture
334
+ the last expression’s value.
335
+
336
+ Returns:
337
+ str: Captured stdout plus the value of ``_`` (if any), or an error string.
338
+ """
339
+ import restrictedpython as rp
340
+
341
+ try:
342
+ compiled = rp.compile_restricted(code, filename="<usercode>", mode="exec")
343
+ loc: Dict[str, Any] = {}
344
+ with io.StringIO() as buf, contextlib.redirect_stdout(buf):
345
+ exec(compiled, {"__builtins__": rp.utility_builtins}, loc)
346
+ output = buf.getvalue()
347
+ # fetch last expression result if stored under _
348
+ last = loc.get("_", "")
349
+ return (output + str(last)).strip()
350
+ except Exception as e:
351
+ return f"safe_python_eval error: {e}"
352
+
353
+ @tool
354
+ def actor_role_lookup(actor_full_name: str) -> str:
355
+ """
356
+ Find the given actor’s character first-name in the Polish TV series *Magda M.*.
357
+
358
+ Args:
359
+ actor_full_name (str): Actor’s full name as listed on Wikipedia.
360
+
361
+ Returns:
362
+ str: Character first-name or “Not found.”.
363
+ """
364
+ url = "https://pl.wikipedia.org/wiki/Magda_M._(serial_telewizyjny)"
365
+ try:
366
+ tables = pd.read_html(url)
367
+ cast = pd.concat(tables, ignore_index=True)
368
+ row = cast[cast.apply(lambda r: actor_full_name in " ".join(map(str, r)), axis=1)]
369
+ if row.empty:
370
+ return "Not found."
371
+ char_cell = row.iloc[0][1]
372
+ first_name = str(char_cell).split()[0]
373
+ return first_name
374
+ except Exception as e:
375
+ return f"actor_role_lookup error: {e}"
376
+
377
+ @tool
378
+ def libretext_lookup() -> str:
379
+ """Returns the vet surname from LibreTexts chemistry 1.E Exercises."""
380
+ url = "https://chem.libretexts.org/Bookshelves/General_Chemistry/Introductory_Chemistry_(CK-12)/01%3A_Introduction_to_Chemistry/1.E%3A_Exercises"
381
+ try:
382
+ soup = BeautifulSoup(requests.get(url, headers=HEADERS).text, "html.parser")
383
+ txt = soup.get_text("\n")
384
+ match = re.search(r"([A-Z][a-z]+)\s+is an equine veterinarian", txt)
385
+ return match.group(1) if match else "Surname not found."
386
+ except Exception as e:
387
+ return f"libretext_lookup error: {e}"
388
+
389
+ @tool
390
+ def featured_article_nominator(article_title: str) -> str:
391
+ """
392
+ Return the nominator(s) of a Wikipedia Featured Article promoted in November 2016.
393
+
394
+ Args:
395
+ article_title (str): Exact or substring match of the article title.
396
+
397
+ Returns:
398
+ str: Nominator names with footnote markers removed,
399
+ or an explanatory message if not found.
400
+ """
401
+ try:
402
+ log_url = "https://en.wikipedia.org/wiki/Wikipedia:Featured_articles/log/2016"
403
+ tables = pd.read_html(log_url)
404
+ # pick the table that has both the “Article” and “Nominator(s)” columns
405
+ df = next(
406
+ t for t in tables
407
+ if {"Article", "Nominator(s)"}.issubset(t.columns)
408
+ )
409
+ except StopIteration:
410
+ return "Could not find the FA log table."
411
+ except Exception as e:
412
+ return f"Error loading FA log: {e}"
413
+
414
+ # Exact- or substring match on the Article column
415
+ row = df[df["Article"].str.contains(article_title, case=False, na=False)]
416
+ if row.empty:
417
+ return "Article not found in the November 2016 FA log."
418
+
419
+ nominators = row.iloc[0]["Nominator(s)"]
420
+ # remove citation footnotes like [1], [note a], etc.
421
+ clean = re.sub(r"\[.*?]", "", str(nominators)).strip()
422
+ return clean or "Nominator not recorded."
423
+
424
+
425
+ @tool
426
+ def chess_from_image(image_path: str) -> str:
427
+ """
428
+ Analyse a chess diagram (Black to move) and return the engine’s best move.
429
+
430
+ Args:
431
+ image_path (str): Local file path to a chessboard image.
432
+
433
+ Returns:
434
+ str: Move in algebraic notation, or a “TODO” stub.
435
+ """
436
+ return "TODO: chess image analysis not implemented."
437
+
438
+ @tool
439
+ def whisper_transcribe(audio_path: str, scope: str = "full") -> str:
440
+ """
441
+ Transcribe audio using OpenAI Whisper.
442
+
443
+ Args:
444
+ audio_path (str): Path to an audio file supported by Whisper.
445
+ scope (str, optional): Portion of the output to return:
446
+ ``"full"``, ``"filling"``, or ``"pages"``. Defaults to ``"full"``.
447
+
448
+ Returns:
449
+ str: The transcription (or the requested subset) or an error message.
450
+ """
451
+ try:
452
+ import openai
453
+ client = openai.OpenAI()
454
+ with open(audio_path, "rb") as f:
455
+ transcript = client.audio.transcriptions.create(model="whisper-1", file=f)
456
+ text = transcript.text.strip()
457
+ if scope == "filling":
458
+ # Return only list after the word "filling" if present
459
+ seg = re.split(r"filling|filling:|for the filling", text, flags=re.I)
460
+ return seg[-1].strip() if len(seg) > 1 else text
461
+ return text
462
+ except Exception as e:
463
+ return f"whisper_transcribe error: {e}"
464
+
465
+ @tool
466
+ def youtube_video_birdcount(url: str, frame_skip: int = 15) -> str:
467
+ """
468
+ (Stub) Estimate the maximum number of bird species visible simultaneously in a video.
469
+
470
+ Args:
471
+ url (str): Full YouTube watch URL.
472
+ frame_skip (int, optional): Analyse every *n*-th frame. Defaults to 15.
473
+
474
+ Returns:
475
+ str: Placeholder text until the vision model is implemented.
476
+ """
477
+ return "TODO: bird species detection not implemented."
478
+
479
+ @tool
480
+ def discography_search(
481
+ artist: str,
482
+ start: int | None = None,
483
+ end: int | None = None
484
+ ) -> str:
485
+ """
486
+ Return a list (or count) of studio albums by *artist* optionally filtered
487
+ by release year.
488
+
489
+ Args:
490
+ artist (str): Band or solo-artist name (e.g. ``"Radiohead"``).
491
+ start (int | None, optional): Earliest year to include, inclusive.
492
+ If provided together with *end*, the function returns **only the
493
+ count** of albums in the range. Defaults to ``None``.
494
+ end (int | None, optional): Latest year to include, inclusive.
495
+ Must be supplied with *start* to take effect. Defaults to ``None``.
496
+
497
+ Returns:
498
+ str: • One bullet-per-album (``"• Title (Year)"``)
499
+ • *or* an integer count as a string when both *start* and *end*
500
+ are given.
501
+ • Error message if the discography page cannot be parsed.
502
+ """
503
+ wikipedia.set_lang("en")
504
+ target = f"{artist} discography"
505
+
506
+ def _get_page(title: str) -> Optional[wikipedia.WikipediaPage]:
507
+ try:
508
+ return wikipedia.page(title, auto_suggest=False)
509
+ except Exception:
510
+ return None
511
+
512
+ page = _get_page(target) or next((p for p in ( _get_page(t) for t in wikipedia.search(target)[:5] ) if p), None)
513
+ if page is None:
514
+ return "No Wikipedia discography page found."
515
+
516
+ albums: list[str] = []
517
+ try:
518
+ tables = [_flatten_multiindex(t) for t in pd.read_html(page.url, flavor="bs4")]
519
+ studio = next(
520
+ t for t in tables if any(re.search(r"studio", c, re.I) for c in t.columns)
521
+ )
522
+ title_col = next(c for c in studio.columns if re.search(r"(title|album)", c, re.I))
523
+ year_col = next((c for c in studio.columns if re.search(r"year", c, re.I)), None)
524
+ for _, row in studio.iterrows():
525
+ title = re.sub(r"\[.*?]", "", str(row[title_col])).strip()
526
+ year_match = re.search(r"(\d{4})", str(row[year_col] if year_col else ""))
527
+ year = int(year_match.group(1)) if year_match else None
528
+ albums.append((title, year))
529
+ except Exception as e:
530
+ return f"Error parsing discography tables: {e}"
531
+
532
+ if start is not None and end is not None:
533
+ return str(sum(1 for _, y in albums if y and start <= y <= end))
534
+ return "\n".join(f"• {t} ({y})" for t, y in albums)
535
+
536
+ toolkit = [
537
+ discography_search, youtube_quote, youtube_video_birdcount,
538
+ whisper_transcribe, chess_from_image, featured_article_nominator,
539
+ commutativity_counterexample, libretext_lookup, actor_role_lookup,
540
+ safe_python_eval, baseball_stat, nasa_award_from_article, pdf_find_string,
541
+ olympic_min_athletes, npb_adjacent_numbers, excel_sum_food,
542
+ malko_first_name, vegetable_filter,
543
+ ]
544
+
545
+ model = InferenceClientModel(
546
+ model_id="gpt-4.1", # or "gpt-3.5-turbo"
547
+ provider="openai",
548
+ api_key=os.environ.get("OPENAI_API_KEY")
549
+ )
550
+
551
+ # --- Agent Class ---
552
+ agent = CodeAgent(
553
+ name="BasicAgent",
554
+ description="An agent capable of answering questions using various tools for its tasks.",
555
+ tools=toolkit,
556
+ model=model,
557
+ planning_interval=5
558
+ )
559
 
560
  def run_and_submit_all( profile: gr.OAuthProfile | None):
561
  """
 
577
  submit_url = f"{api_url}/submit"
578
 
579
  # 1. Instantiate Agent ( modify this part to create your agent)
580
+ global agent
 
 
 
 
581
  # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
582
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
583
  print(agent_code)