File size: 25,958 Bytes
763ca02
bb63c55
f1a07b1
81b21f9
dd59d44
f1a07b1
10e9b7d
3c4371f
763ca02
6279433
dd59d44
81b21f9
763ca02
 
6279433
763ca02
 
4d20ba5
763ca02
0ba08eb
f4d46a2
f45d60f
4b82152
763ca02
5989699
d59f015
e80aab9
3db6293
e80aab9
a45f805
6279433
2d82e56
 
6279433
2d82e56
 
6279433
2d82e56
6279433
 
 
2d82e56
6279433
2d82e56
6279433
2d82e56
 
 
6279433
2d82e56
 
 
 
 
 
6279433
2d82e56
 
6279433
2d82e56
 
 
 
 
 
 
 
 
6279433
f164cc2
 
 
 
6279433
f164cc2
 
 
6279433
f164cc2
 
 
6279433
f164cc2
 
 
 
 
 
6279433
f164cc2
 
 
 
 
6279433
f164cc2
 
 
6279433
f164cc2
 
 
 
 
 
6279433
f164cc2
 
 
 
 
 
 
6279433
f164cc2
6279433
f164cc2
 
 
 
 
 
 
6279433
f164cc2
 
 
 
 
 
6279433
689c0dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df04047
 
689c0dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df04047
 
689c0dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29a41be
ed781c5
dd59d44
ed781c5
29a41be
ed781c5
29a41be
 
 
 
 
 
ed781c5
 
 
29a41be
ed781c5
29a41be
 
 
ed781c5
 
29a41be
 
 
ed781c5
29a41be
ed781c5
 
29a41be
ed781c5
29a41be
 
 
ed781c5
29a41be
 
 
ed781c5
 
 
29a41be
 
 
 
 
 
ed781c5
29a41be
 
ed781c5
 
 
 
 
 
29a41be
 
ed781c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29a41be
 
81b21f9
7eb4502
81b21f9
7c71cec
d0e6fef
 
37fcc0e
b4ab959
37fcc0e
 
d0e6fef
 
 
37fcc0e
d0e6fef
37fcc0e
6279433
 
 
b4ab959
7eb4502
6279433
37fcc0e
 
 
 
 
6279433
37fcc0e
 
 
6279433
37fcc0e
6279433
37fcc0e
 
6279433
37fcc0e
63bf5e9
6279433
d0e6fef
6279433
37fcc0e
6279433
7eb4502
d0e6fef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6279433
37fcc0e
d0e6fef
37fcc0e
 
6279433
 
 
 
 
7eb4502
d0e6fef
6279433
37fcc0e
31243f4
d59f015
31243f4
 
 
2e0c033
6279433
 
 
1a2b457
6279433
 
689c0dd
ed781c5
6279433
 
b4ab959
 
 
 
dc8c03a
b4ab959
 
 
 
 
 
 
f164cc2
689c0dd
 
86bb81c
99dd386
 
 
125e003
2d82e56
dc4500c
2d82e56
dc4500c
2d82e56
86c8012
2d82e56
dc4500c
5140fc1
b4ab959
2d82e56
 
4a968ae
ec56f03
4a968ae
29a41be
1a2b457
29a41be
 
ed781c5
29a41be
6279433
2d82e56
 
 
 
 
 
2a20d72
2d82e56
 
 
 
 
86bb81c
 
 
99dd386
 
 
125e003
2d82e56
c8430af
dc4500c
2d82e56
 
b4ab959
ca34af8
ec56f03
7c71cec
 
 
 
 
763ca02
7c71cec
 
 
 
 
 
6f2cd7e
1754e8d
 
 
 
 
 
604f58b
763ca02
e6c31e1
904a2cc
63e672e
5c81a56
904a2cc
2d82e56
ca34af8
5989699
4021bf3
763ca02
9fab94b
31243f4
 
 
 
7d65c66
763ca02
3c4371f
7e4a06b
763ca02
3c4371f
7e4a06b
3c4371f
7d65c66
3c4371f
7e4a06b
31243f4
 
e80aab9
b177367
31243f4
 
 
3c4371f
31243f4
b177367
36ed51a
c1fd3d2
3c4371f
7d65c66
31243f4
763ca02
eccf8e4
31243f4
7d65c66
9fab94b
 
6279433
 
 
31243f4
763ca02
 
31243f4
763ca02
 
 
 
e80aab9
31243f4
 
7d65c66
31243f4
 
e80aab9
b177367
7d65c66
 
3c4371f
31243f4
424e04c
31243f4
 
81b21f9
31243f4
 
 
 
6668ecd
763ca02
 
 
 
 
 
 
 
 
 
31243f4
763ca02
 
 
 
 
 
 
 
31243f4
 
3c4371f
31243f4
 
763ca02
 
 
 
 
 
3c4371f
31243f4
e80aab9
7d65c66
31243f4
e80aab9
3928910
7d65c66
e80aab9
 
31243f4
e80aab9
 
3c4371f
 
 
e80aab9
7c71cec
31243f4
 
e80aab9
3c4371f
e80aab9
 
3c4371f
e80aab9
7d65c66
3c4371f
31243f4
7d65c66
31243f4
3c4371f
 
 
 
 
e80aab9
31243f4
 
 
 
7d65c66
31243f4
 
 
 
e80aab9
 
 
 
31243f4
0ee0419
e514fd7
 
 
81917a3
e514fd7
 
 
 
 
 
 
 
e80aab9
 
7e4a06b
e80aab9
9fab94b
 
f1a07b1
 
c1032f3
763ca02
31243f4
763ca02
 
 
7d65c66
 
e80aab9
31243f4
 
9fab94b
763ca02
e80aab9
 
 
763ca02
7d65c66
3c4371f
763ca02
7d65c66
3c4371f
 
7d65c66
3c4371f
7d65c66
 
763ca02
7d65c66
 
763ca02
 
 
7d65c66
763ca02
 
 
7d65c66
763ca02
3c4371f
31243f4
763ca02
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
import inspect
import json
import os
from io import BytesIO
from typing import Optional

import gradio as gr
import pandas as pd
import requests
import whisper
from bs4 import BeautifulSoup, NavigableString, Tag
from PIL import Image
from smolagents import (
    CodeAgent,
    GoogleSearchTool,
    InferenceClientModel,
    load_tool,
    OpenAIServerModel,
    tool,
    Tool,
    ToolCollection,
    VisitWebpageTool,
    WikipediaSearchTool,
)

# (Keep Constants as is)
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"


@tool
def extract_table_from_html(html: str, match: str | None = None) -> list:
    """
    A tool that extracts HTML tables from HTML content and returns them as pandas DataFrames.
    Example usecases include extracting tables from Wikipedia pages, HTML emails, or other web content.
    Args:
        html (str): The HTML content containing HTML tables to extract. This can be raw HTML
                   string content or a URL to a webpage.
        match (str | None, optional): A string or regular expression pattern to match
                                    against table text content. If None, all tables
                                    are extracted. Defaults to None.
                                    DO NOT use HTML strings / tags in this parameter.

    Returns:
        list: A list of pandas DataFrames, where each DataFrame represents a table found
              in the HTML content. Returns an empty list if no tables are found.
    """
    import pandas as pd

    try:
        # Extract tables using pandas
        if match is not None:
            tables = pd.read_html(html, match=match)
        else:
            tables = pd.read_html(html)

        # Return the list of DataFrames directly
        return tables if tables else []

    except ValueError as e:
        if "No tables found" in str(e):
            # Return empty list instead of raising error
            return []
        else:
            raise ValueError(f"Error extracting tables from HTML content: {e}")
    except Exception as e:
        raise Exception(f"Failed to extract tables from HTML content: {e}")


@tool
def audio_to_text(file_path: str) -> str:
    """
    A tool that converts audio files to text using OpenAI's Whisper speech recognition model.

    This function transcribes audio content from a local audio file and returns the transcript
    as a JSON string containing timestamped segments. It uses the Whisper "base" model for
    speech-to-text conversion.

    Args:
        file_path (str): The local file path to the audio file to be transcribed.
                        Supports common audio formats like MP3, WAV, M4A, FLAC, etc.

    Returns:
        str: A JSON string containing the transcript data with the following structure:
             {
                 "transcript": [
                     {
                         "start": float,  # Start time in seconds
                         "end": float,    # End time in seconds
                         "text": str      # Transcribed text segment
                     },
                     ...
                 ]
             }

    Raises:
        FileNotFoundError: If the specified audio file does not exist.
        Exception: If the audio file cannot be processed or transcribed.

    Example:
        >>> result = audio_to_text("path/to/audio.mp3")
        >>> import json
        >>> transcript_data = json.loads(result)
        >>> for segment in transcript_data["transcript"]:
        ...     print(f"{segment['start']:.2f}s - {segment['end']:.2f}s: {segment['text']}")

    Note:
        - Uses OpenAI Whisper "base" model for transcription
        - Processes audio without verbose output or word-level timestamps
        - Returns empty segments list if no speech is detected
        - Processing time depends on audio file length and system performance
    """
    import json

    import whisper

    model = whisper.load_model("base")
    result = model.transcribe(file_path, verbose=False, word_timestamps=False)

    transcript_data = [
        {
            "start": segment["start"],
            "end": segment["end"],
            "text": segment["text"].strip(),
        }
        for segment in result["segments"]
    ]

    return json.dumps({"transcript": transcript_data})


@tool
def get_wikipedia_page_url_by_year(wikipedia_page_name: str, year: int) -> str:
    """
    Retrieve Wikipedia page URL for a specific year (latest revision in that year).

    Args:
        wikipedia_page_name (str): Name of the Wikipedia page
        year (int): Year to get the page content from

    Returns:
        str: URL of the Wikipedia page from that year with revision included
    """
    import requests
    import wikipediaapi

    # Create Wikipedia API instance
    wiki = wikipediaapi.Wikipedia(
        user_agent="Final Project Agent Course (vthanhvinh@gmail.com)",
        language="en",
    )

    # Get the page
    page = wiki.page(wikipedia_page_name)
    if not page.exists():
        raise ValueError(f"Wikipedia page '{wikipedia_page_name}' does not exist")

    # Use Wikipedia API to get revisions from the specified year
    api_url = "https://en.wikipedia.org/w/api.php"

    # Get the latest revision from the specified year
    params = {
        "action": "query",
        "format": "json",
        "prop": "revisions",
        "titles": wikipedia_page_name,
        "rvprop": "ids|timestamp",
        "rvend": f"{year}-12-31T23:59:59Z",
        "rvstart": f"{year}-01-01T00:00:00Z",
        "rvdir": "newer",
        "rvlimit": 1,
    }

    response = requests.get(api_url, params=params)
    data = response.json()
    pages = data["query"]["pages"]
    page_id = list(pages.keys())[0]
    revisions = pages[page_id].get("revisions", [])

    if not revisions:
        raise ValueError(
            f"No revisions found for '{wikipedia_page_name}' in year {year}"
        )

    # Get revision ID and construct URL
    rev_id = revisions[0]["revid"]
    url = f"https://en.wikipedia.org/w/index.php?title={wikipedia_page_name}&oldid={rev_id}"

    return url


@tool
def get_wikipedia_section_tables(
    section_name: str, soup_object: BeautifulSoup
) -> list[pd.DataFrame]:
    """
    A tool that extracts tables from a specific section of a Wikipedia page using BeautifulSoup and pandas.

    This function searches for a section in the following order:
    1. First tries to find an element with ID matching the section name
    2. If not found, tries to find an h2 element with text matching the section name
    3. If not found, tries to find an h3 element with text matching the section name

    Once the section is found, it goes to the parent element, finds the next <table> sibling,
    and uses pandas read_html to extract the table data.

    Args:
        section_name (str): The name of the section to extract table from
        soup_object: A BeautifulSoup object containing the parsed HTML content

    Returns:
        list: A list of pandas DataFrames representing tables found after the section,
              or empty list if no tables found

    Example:
        >>> from bs4 import BeautifulSoup
        >>> html = "<html><body><h2>Statistics</h2><table><tr><td>Data</td></tr></table></body></html>"
        >>> soup = BeautifulSoup(html, 'html.parser')
        >>> tables = get_wikipedia_section_table("Statistics", soup)
        >>> print(tables[0] if tables else "No tables found")
    """
    import pandas as pd
    from bs4 import BeautifulSoup

    if not soup_object:
        return []

    # Ensure we have a BeautifulSoup object
    if not isinstance(soup_object, BeautifulSoup):
        return []

    section_element = None

    # Strategy 1: Try to find element with ID same as section name
    # Convert section name to potential ID format (replace spaces with underscores, etc.)
    section_id = section_name.replace(" ", "_")
    element = soup_object.find(id=section_id)
    if element:
        section_element = element

    # Strategy 2: Try to find h2 element with text same as section name
    if not section_element:
        h2_elements = soup_object.find_all("h2")
        for h2 in h2_elements:
            if h2.get_text().strip() == section_name:
                section_element = h2
                break

    # Strategy 3: Try to find h3 element with text same as section name
    if not section_element:
        h3_elements = soup_object.find_all("h3")
        for h3 in h3_elements:
            if h3.get_text().strip() == section_name:
                section_element = h3
                break

    # If no section found, return empty list
    if not section_element:
        return []

    # Go to parent element and find next table sibling
    parent = section_element.parent
    if not parent:
        return []

    # Find the next table sibling from the parent
    table = parent.find_next_sibling("table")
    if not table:
        return []
    try:
        # Use pandas read_html to extract table data
        table_html = str(table)
        tables = pd.read_html(table_html)
        return tables if tables else []
    except ValueError:
        # No tables found or parsing error
        return []
    except Exception:
        # Any other error
        return []


@tool
def download_file(question_id: str, file_name: str) -> str:
    """
    A tool that downloads file that was mentioned in a question and store it as local file.
    Returns a JSON string containing the file path and optionally the text content if the file has a text MIME type.

    Args:
        question_id: Question ID.
        file_name: File name.
    Returns:
        str: JSON string containing file information. Structure:
             - For text files: {"path": "local_path", "content": "file_content"}
             - For non-text files: {"path": "local_path"}
    """
    import json
    import os

    import requests

    url = f"{DEFAULT_API_URL}/files/{question_id}"
    print(f"Fetching file from URL: {url}")

    # Create downloads directory if it doesn't exist
    response = None
    try:
        response = requests.get(url, timeout=30)
        response.raise_for_status()  # Raises an HTTPError for bad responses

        # Check if response is empty
        if not response.content:
            raise ValueError(f"Empty response received from {url}")

        # Check content type
        content_type = response.headers.get("content-type", "").lower()
        print(f"Response content-type: {content_type}")
        print(f"Response content length: {len(response.content)} bytes")

        # Use original filename directly
        local_path = file_name

        # Save the file locally
        with open(local_path, "wb") as f:
            f.write(response.content)

        print(f"File saved to: {local_path}")

        # Check if the file has a text MIME type
        text_mime_types = [
            "text/",
            "application/json",
            "application/xml",
            "application/javascript",
            "application/csv",
            "application/x-csv",
            "text/csv",
        ]

        is_text_file = any(
            content_type.startswith(mime_type) for mime_type in text_mime_types
        )

        result = {"path": local_path}

        if is_text_file:
            # Decode response content directly as text using UTF-8
            text_content = response.content.decode("utf-8")
            result["content"] = text_content
            print(
                f"Added text content to result (length: {len(text_content)} characters)"
            )

        return json.dumps(result)

    except requests.exceptions.RequestException as e:
        raise ValueError(f"Failed to download file from {url}: {e}")
    except Exception as e:
        # Print first 200 characters of response content for debugging
        content_preview = (
            response.content[:200]
            if response and hasattr(response, "content")
            else b"No response"
        )
        print(f"Error downloading file. Content preview: {content_preview}")
        raise ValueError(f"Failed to download file from {url}: {e}")


# --- Basic Agent Definition ---
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
class BasicAgent:
    def __init__(self):
        print("BasicAgent initialized.")
        self.multimodal_agent = CodeAgent(
            tools=[
                VisitWebpageTool(),
                GoogleSearchTool("serper"),
                download_file,
                audio_to_text,
                WikipediaSearchTool(),
                get_wikipedia_page_url_by_year,
                get_wikipedia_section_tables,
            ],
            model=OpenAIServerModel(model_id="gpt-4o"),
            additional_authorized_imports=[
                "requests",
                "bs4",
                "markdownify",
                "wikipedia",
                "pandas",
                "io",
                "PIL",
                "img2text",
                "PIL.Image",
                "cv2",
                "numpy",
                "whisper",
                "openpyxl",
                "json",
                "wikipediaapi",
                "pytube",
                "pytubefix",
                "pytubefix.cli",
                "youtube_transcript_api",
            ],
            name="multimodal_agent",
            description="""
                 This is a powerful agent, it specializes in: 
                    - Writing code to solve problem. 
                    - Solving hard Maths problems. 
                    - Browse the web to find information.
                    - Reason across audio, vision, and text, a.k.a multimodal agent. """,
            max_steps=5,
        )

        self.manager_agent = CodeAgent(
            model=InferenceClientModel(
                model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
            ),
            tools=[
                download_file,
                audio_to_text,
                get_wikipedia_page_url_by_year,
                get_wikipedia_section_tables,
            ],
            managed_agents=[self.multimodal_agent],
            additional_authorized_imports=[
                "requests",
                "bs4",
                "markdownify",
                "wikipedia",
                "io",
                "pandas",
                "PIL",
                "img2text",
                "PIL.Image",
                "cv2",
                "numpy",
                "openpyxl",
                "json",
                "wikipediaapi",
                "pytube",
                "pytubefix",
                "pytubefix.cli",
                "youtube_transcript_api",
            ],
            planning_interval=2,
            max_steps=10,
        )

    def __call__(self, question: str, question_id: str, file_name: str) -> str:
        print(f"Agent received question: {question}")
        file = f"Provided data file: {file_name}" if file_name else ""
        metadata = {}
        metadata["question_id"] = question_id
        if file_name:
            metadata["file_name"] = file_name

        prompt = f"""
            Answer the following question: 
               "{question}". 
            Question metadata in JSON format: 
            ```
            {json.dumps(metadata)}
            ```
            Follow below rules when possible:
                - Please take the question literally! Do not add any additional information or assumptions.
                - Please answer as concisely as possible.
                - If the question asks for a number, please return a numerical answer without unit (unless unit is specifically asked for). For example: 3 instead of three, 0 instead of None, 3 instead of $3.
                - If the question asks for a number with specific decimal places, please format the number into string with the same decimal places. For example: 3.00 instead of 3.
                - If the question asks for a list, please make sure that the elements are separated by a comma(`,`) and a space(` `). For example: `1, 2, 3` instead of `1,2,3`.
                - If the question asks for name without abbreviations, please ALWAYS ask `multimodal_agent` for the FULL name of final answer to ensure NO abbreviation is included in Final Answer. For example: `United States` instead of `US`.
                - To parse data from Wikipedia page, please use `get_wikipedia_section_tables` tool. 
        """
        if "food" in question.lower() or "drink" in question.lower():
            prompt = f"""
            {prompt}
               - Be careful about the difference between food and drink items. For instance: Ice Cream is a food item!
            """
        result = self.manager_agent.run(prompt)
        print(f"Agent responded with: {result}")
        return result


def run_and_submit_all(question_id: str, profile: gr.OAuthProfile | None):
    """
    Fetches all questions, runs the BasicAgent on them, submits all answers,
    and displays the results.
    """
    # --- Determine HF Space Runtime URL and Repo URL ---
    space_id = os.getenv("SPACE_ID")  # Get the SPACE_ID for sending link to the code

    if profile:
        username = f"{profile.username}"
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please Login to Hugging Face with the button.", None

    api_url = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url = f"{api_url}/submit"

    # 1. Instantiate Agent ( modify this part to create your agent)
    try:
        agent = BasicAgent()
    except Exception as e:
        print(f"Error instantiating agent: {e}")
        return f"Error initializing agent: {e}", None
    # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    print(agent_code)

    # 2. Fetch Questions
    print(f"Fetching questions from: {questions_url}")
    response = None
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if question_id:
            questions_data = [
                item for item in questions_data if item.get("task_id") == question_id
            ]
        if not questions_data:
            print("Fetched questions list is empty.")
            return "Fetched questions list is empty or invalid format.", None
        print(f"Fetched {len(questions_data)} questions.")
    except requests.exceptions.JSONDecodeError as e:
        print(f"Error decoding JSON response from questions endpoint: {e}")
        print(f"Response: {response}")
        return f"Error decoding server response for questions: {e}", None
    except requests.exceptions.RequestException as e:
        print(f"Error fetching questions: {e}")
        return f"Error fetching questions: {e}", None
    except Exception as e:
        print(f"An unexpected error occurred fetching questions: {e}")
        return f"An unexpected error occurred fetching questions: {e}", None

    # 3. Run your Agent
    results_log = []
    answers_payload = []
    print(f"Running agent on {len(questions_data)} questions...")
    for item in questions_data:
        print(f"Question data: {json.dumps(item)}")
        task_id = item.get("task_id")
        question_text = item.get("question")
        file_name = item.get("file_name")
        if not task_id or question_text is None:
            print(f"Skipping item with missing task_id or question: {item}")
            continue
        try:
            submitted_answer = agent(question_text, task_id, file_name)
            answers_payload.append(
                {"task_id": task_id, "submitted_answer": submitted_answer}
            )
            results_log.append(
                {
                    "Task ID": task_id,
                    "Question": question_text,
                    "Submitted Answer": submitted_answer,
                }
            )
        except Exception as e:
            print(f"Error running agent on task {task_id}: {e}")
            results_log.append(
                {
                    "Task ID": task_id,
                    "Question": question_text,
                    "Submitted Answer": f"AGENT ERROR: {e}",
                }
            )

    if not answers_payload:
        print("Agent did not produce any answers to submit.")
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    # 4. Prepare Submission
    submission_data = {
        "username": username.strip(),
        "agent_code": agent_code,
        "answers": answers_payload,
    }
    status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
    print(status_update)

    # 5. Submit
    print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
    try:
        print(f"Submission_data: {submission_data}")
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        print(f"Submission successful. Final status: {final_status}")
        results_df = pd.DataFrame(results_log)
        return final_status, results_df
    except requests.exceptions.HTTPError as e:
        error_detail = f"Server responded with status {e.response.status_code}."
        try:
            error_json = e.response.json()
            error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
        except requests.exceptions.JSONDecodeError:
            error_detail += f" Response: {e.response.text[:500]}"
        status_message = f"Submission Failed: {error_detail}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except requests.exceptions.Timeout:
        status_message = "Submission Failed: The request timed out."
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except requests.exceptions.RequestException as e:
        status_message = f"Submission Failed: Network error - {e}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except Exception as e:
        status_message = f"An unexpected error occurred during submission: {e}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df


# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown(
        """
        **Instructions:**

        1.  Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
        2.  Log in to your Hugging Face account using the button below. This uses your HF username for submission.
        3.  Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.

        ---
        **Disclaimers:**
        Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
        This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
        """
    )

    gr.LoginButton()

    question_id = gr.Textbox(
        label="Question id to solve (empty to solve all)",
        lines=1,
        interactive=True,
        value="7bd855d8-463d-4ed5-93ca-5fe35145f733",
    )
    run_button = gr.Button("Run Evaluation & Submit All Answers")
    status_output = gr.Textbox(
        label="Run Status / Submission Result", lines=5, interactive=False
    )
    # Removed max_rows=10 from DataFrame constructor
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(
        fn=run_and_submit_all,
        inputs=[question_id],
        outputs=[status_output, results_table],
    )

if __name__ == "__main__":
    print("\n" + "-" * 30 + " App Starting " + "-" * 30)
    # Check for SPACE_HOST and SPACE_ID at startup for information
    space_host_startup = os.getenv("SPACE_HOST")
    space_id_startup = os.getenv("SPACE_ID")  # Get SPACE_ID at startup

    if space_host_startup:
        print(f"✅ SPACE_HOST found: {space_host_startup}")
        print(f"   Runtime URL should be: https://{space_host_startup}.hf.space")
    else:
        print("ℹ️  SPACE_HOST environment variable not found (running locally?).")

    if space_id_startup:  # Print repo URLs if SPACE_ID is found
        print(f"✅ SPACE_ID found: {space_id_startup}")
        print(f"   Repo URL: https://huggingface.co/spaces/{space_id_startup}")
        print(
            f"   Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
        )
    else:
        print(
            "ℹ️  SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
        )

    print("-" * (60 + len(" App Starting ")) + "\n")

    print("Launching Gradio Interface for Basic Agent Evaluation...")
    demo.launch(debug=True, share=False)