File size: 19,482 Bytes
10e9b7d
7ae8320
a88165c
67bb955
7ae8320
0892edd
0031094
8371f52
1a6c543
586c4d6
6eb3bc6
10e9b7d
eccf8e4
7d65c66
3c4371f
10e9b7d
ddb304e
586c4d6
 
 
 
1a6c543
2e48cc5
4399d22
ff707c1
458a6e6
586c4d6
e80aab9
3db6293
586c4d6
a88165c
 
ddb304e
 
586c4d6
 
e80aab9
31243f4
 
67bb955
 
 
7ae8320
 
 
 
67bb955
8371f52
458a6e6
a88165c
8371f52
458a6e6
8371f52
 
 
 
 
458a6e6
 
586c4d6
458a6e6
31243f4
586c4d6
458a6e6
e3b1bad
1a6c543
5b59c30
0031094
 
 
 
 
c3243f8
a88165c
5b59c30
0031094
 
37408a2
0031094
 
5b59c30
0031094
 
 
 
 
5b59c30
0031094
 
 
 
 
e3b1bad
0031094
 
 
 
 
c3243f8
 
 
 
 
 
 
 
 
 
 
 
 
0031094
 
 
e3b1bad
5b59c30
1a6c543
 
 
 
 
 
a88165c
2e48cc5
038763c
e3b1bad
038763c
4021bf3
a44c8b4
0892edd
 
3223430
e3b1bad
1ce0235
1a6c543
1ce0235
8371f52
7dc40c2
4399d22
ff707c1
4399d22
ff707c1
e3b1bad
ff707c1
eb37598
ff707c1
 
 
 
 
458a6e6
67bb955
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0031094
67bb955
 
7ae8320
 
 
 
 
 
 
 
 
 
 
 
 
0031094
 
 
 
a88165c
7cf180d
a88165c
f689ac1
c3243f8
 
 
 
a88165c
c3243f8
 
 
 
 
a88165c
c3243f8
eb37598
a88165c
 
7ae8320
 
 
 
 
 
7cf180d
7ae8320
 
 
 
 
 
 
 
 
 
0031094
 
7cf180d
f689ac1
 
7cf180d
 
 
 
 
 
 
 
 
 
 
 
 
7ae8320
7cf180d
7ae8320
 
 
 
 
 
 
 
 
 
 
 
 
 
0031094
7ae8320
586c4d6
 
b90251f
31243f4
 
 
 
7d65c66
b177367
3c4371f
7e4a06b
1ca9f65
3c4371f
7e4a06b
3c4371f
7d65c66
3c4371f
7e4a06b
31243f4
 
e80aab9
b177367
31243f4
 
 
3c4371f
31243f4
b177367
36ed51a
c1fd3d2
3c4371f
7d65c66
31243f4
eccf8e4
31243f4
7d65c66
31243f4
 
3c4371f
 
31243f4
e80aab9
31243f4
 
3c4371f
 
7d65c66
3c4371f
7d65c66
31243f4
 
e80aab9
b177367
7d65c66
 
3c4371f
31243f4
 
 
 
 
 
67bb955
 
 
 
 
2e48cc5
67bb955
2e48cc5
 
67bb955
 
31243f4
a419896
7d65c66
 
0031094
 
31243f4
 
7d65c66
67bb955
 
 
31243f4
 
3c4371f
31243f4
 
b177367
7d65c66
3c4371f
31243f4
e80aab9
7d65c66
31243f4
e80aab9
7d65c66
e80aab9
 
31243f4
e80aab9
 
3c4371f
 
 
e80aab9
 
31243f4
 
e80aab9
3c4371f
e80aab9
 
3c4371f
e80aab9
7d65c66
3c4371f
31243f4
7d65c66
31243f4
3c4371f
 
 
 
 
e80aab9
31243f4
 
 
 
7d65c66
31243f4
 
 
 
e80aab9
 
6b463da
 
a0ad7df
c5a1269
6b463da
 
e80aab9
 
31243f4
0ee0419
e514fd7
 
 
81917a3
e514fd7
 
 
 
 
 
 
 
e80aab9
 
7e4a06b
e80aab9
31243f4
e80aab9
9088b99
7d65c66
 
e80aab9
31243f4
 
 
e80aab9
 
476a809
eaf4627
476a809
 
eaf4627
 
476a809
 
6b463da
476a809
 
 
 
e80aab9
3c4371f
7d65c66
3c4371f
7d65c66
 
3c4371f
 
7d65c66
3c4371f
7d65c66
 
 
 
 
 
 
 
 
3c4371f
 
31243f4
3c4371f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
import os
import tempfile
from base64 import b64encode
from contextlib import suppress
from io import BytesIO
from pprint import pprint
from time import sleep
from typing import TypedDict, List, Dict, Any, Optional, Tuple
from typing_extensions import Annotated

import openai
import gradio as gr
import requests
import inspect
import pandas as pd

from langgraph.graph import MessagesState, StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition

from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage, AnyMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_core.tools import tool
from langchain_tavily import TavilySearch


# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

model = ChatOpenAI(model="gpt-4o", temperature=0)


class State(MessagesState):
    question: str


class BasicAgent:
    def __init__(self):
        self.tools = [
            BasicAgent.search_tool,
            BasicAgent.find_local_files_tool,
            BasicAgent.read_text_file_tool,
            BasicAgent.vision_tool,
            BasicAgent.audio_qa_tool,
            BasicAgent.excel_tool
        ]

        # Chat model with tool support
        self.model_with_tools = model.bind_tools(self.tools, parallel_tool_calls=False)

        # LangGraph
        self.graph = StateGraph(State)
        self.graph.add_node("assistant", self.assistant)
        self.graph.add_node("tools", ToolNode(self.tools))

        self.graph.add_edge(START, "assistant")
        self.graph.add_conditional_edges("assistant", tools_condition)  # decide if tools should be called
        self.graph.add_edge("tools", "assistant")  # loop back

        self.compiled_graph = self.graph.compile()
        print("BasicAgent initialized.")

    def __call__(self, question: str) -> Tuple[str, List[Dict[str, Any]]]:
        print(f"\nAgent received question: {question}")
        sys_msg = SystemMessage(
            content="""
        You are a ReAct (Reasoning and Acting) agent with self-reflection. For each question:

        1. **Thought:** Briefly outline your reasoning step.  
        2. **Reflect:** Check “Did I use all observations? Did my tool call succeed?”  
        3. **Action:** Either call a tool (with arguments) or prepare your final answer.  
        4. **Final Answer:** Provide only the bare result (no labels, no extra text, no actions, no thoughts, no reflection, no "Final Answer" string in the result). For question that contain phrases like `what is the number` or 
        `what is the highest number` return just the number, e.g., 2.
        
        **Answer Format Rules**  
        - If the answer is a number, output digits only (no commas, no units, no strings like “one”, “twenty three”).  
        - If it’s a word or phrase, don't use articles, neither abbreviations (e.g. for cities - Saint Louis, not St. Louis).  
        - If it’s a comma separated list, output a comma-separated list following the above rules for each element.  
        - **Always** output exactly one line as an answer and nothing else.
        
        **Example 1**  
        Q: What is 7 × 6?  
        Thought: Multiply 7 by 6.  
        Reflect: Simple arithmetic, no tool needed.  
        Final Answer: 42
        
        **Example 2**  
        Q: How many prime numbers are there under 20?  
        Thought: Primes under 20 are 2, 3, 5, 7, 11, 13, 17, 19 (8 total).  
        Reflect: Count is correct.  
        Final Answer: 8
        
        **Example 3**  
        Q: Sort “banana”, “apple”, “cherry” alphabetically descending.  
        Thought: Alphabetical descending: cherry, banana, apple.  
        Reflect: Order and formatting confirmed.  
        Final Answer: cherry, banana, apple

        **Example 4**
        Q: The attached csv file contains the amount of impressions for an ad campaign. What were the total amount of clicks crevenue that occurred after 2024-01-01? Express your answer in EUR with two decimal places.
        Thought: Calculate the total amount of revenue for clicks across all dates after 2024-01-01.
        Reflect: I have all the necessary data from the csv file.
        Action: Multiple clicks amount by revenue per click for each row after 2024-01-01 and then sum these values.
        Final Answer: 283934.00

        **Example 5**
        Q: What is the number of the most performant desktop processor model from Ryzen 1000 series?
        Thought: The number of the most performant desktop processor model from Ryzen 1000 series is 1800X.
        Reflect: I know the answer, displaying only the model number without anything else.
        Final Answer: 1800X
        ---
        
        Now answer the next question following this chain-of-thought + reflection pattern, and output **only** the `Final Answer` in the required format.

        """
        )
            
        state = State(
            question=question, 
            messages=[sys_msg, HumanMessage(content=question)]
        )
        config = RunnableConfig(recursion_limit=15)
        result = self.compiled_graph.invoke(state, config)
        final_answer = result["messages"][-1].content
        print(f"\nFinal Answer: {final_answer}")
        return final_answer, result["messages"]

    def assistant(self, state: State):
        print("\nAssistant invoked. State:\n")
        pprint(state)
        response = self.model_with_tools.invoke(state["messages"])
        print("\nAssistant response:", response)
        return {
            "messages": [response]
        }

    @staticmethod
    @tool(
      description="Search the web using TavilySearch and return the final snippet.",
    )
    def search_tool(question: str, max_length: int = 100000) -> str:
        print(f"\nCalling search tool with: {question}, max_lentgh: {max_length}")
        search_ = TavilySearch(
            max_results=4,
            topic="general",
        )
        info = search_.invoke({"query": question})
        result = "\n".join(m["content"] for m in info["results"])
        print("f\nSearch result: {result}")
        return result[:max_length]

    @staticmethod
    @tool(
      description="List task files.",
    )
    def find_local_files_tool() -> list[str]:
        print(f"\nCalling find local files tool")
        files = [f for f in os.listdir() if os.path.isfile(f) and f.startswith('task_file_')]
        print(f"\nReturning", files)
        return files

    @staticmethod
    @tool(
      description="Read the text file and return it's content.",
    )
    def read_text_file_tool(file_name: str) -> str:
        print(f"\nCalling read text file tool for", file_name)
        print("File metadata:", os.stat(file_name))
        with open(file_name, 'r') as f:
            return f.read()

    @staticmethod
    @tool(
        description="Analyze an image file and answer a follow-up question about its content."
    )
    def vision_tool(path: str, question: str) -> str:
        """
        Args:
          path: Path to a local image file.
          question: What you want to know (e.g. 'How many people are in this photo?').
        Returns:
          The LLM’s answer based on the image content.
        """
        if not os.path.exists(path):
            return f"Error: file not found at {path}"

        print("File metadata:", os.stat(path))

        with open(path, "rb") as f:
            b64 = b64encode(f.read()).decode()

        ext = os.path.splitext(path)[1].lower().lstrip(".")
        mime = f"image/{'jpeg' if ext in ('jpg','jpeg') else 'png'}"
        
        # 2) Build the multimodal message
        msg = HumanMessage(content=[
            {"type": "text", "text": question},
            {
              "type": "image_url",
              "image_url": {"url": f"data:{mime};base64,{b64}"}
            }
        ])
        
        response = model.invoke([SystemMessage(content="Analyze the image and answer the question."), msg])
        result = response.content
        print("Result:", result)
        return result

    @staticmethod
    @tool(
        description="Transcribe an audio file with Whisper and answer a question about its content."
    )
    def audio_qa_tool(path: str, question: str, max_chars: int = 10000) -> str:
        """
        Args:
          path: Local filesystem path to an audio file (mp3, wav, etc.).
          question: What to ask about the audio content.
          max_chars: Maximum length of the returned answer.
        Returns:
          The LLM’s answer, based on the transcript (truncated if necessary).
        """
        if not os.path.exists(path):
            return f"Error: file not found at {path}"

        print("File metadata:", os.stat(path))
        with open(path, "rb") as audio_file:
            client = openai.OpenAI()
            transcription = client.audio.transcriptions.create(
                file=audio_file,
                model="whisper-1"
            )
        transcript = transcription.text
        prompt = f"""
            Here is a transcript of an audio file:
            '''{transcript}'''

            Question: '''{question}'''
                    
            Please answer briefly based on this transcript, and give only the answer.
        """
        response = model.invoke([{"role": "user", "content": prompt}])
    
        answer = response.content.strip()
        return answer[:max_chars]


    @staticmethod
    @tool(
        description="Load an Excel file and returns it's text representation."
    )
    def excel_tool(path: str) -> str:
        """
        Args:
          path: Path to the .xlsx file.
        Returns:
          The string form of the content.
        """
        df = pd.read_excel(path, engine='openpyxl')
        return str(df.to_csv(index=False))
    

def run_and_submit_all( profile: gr.OAuthProfile | None):
    """
    Fetches all questions, runs the BasicAgent on them, submits all answers,
    and displays the results.
    """
    # --- Determine HF Space Runtime URL and Repo URL ---
    space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code

    if profile:
        username= f"{profile.username}"
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please Login to Hugging Face with the button.", None

    api_url = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url = f"{api_url}/submit"

    # 1. Instantiate Agent ( modify this part to create your agent)
    try:
        agent = BasicAgent()
    except Exception as e:
        print(f"Error instantiating agent: {e}")
        return f"Error initializing agent: {e}", None
    # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    print(agent_code)

    # 2. Fetch Questions
    print(f"Fetching questions from: {questions_url}")
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
             print("Fetched questions list is empty.")
             return "Fetched questions list is empty or invalid format.", None
        print(f"Fetched {len(questions_data)} questions.")
    except requests.exceptions.RequestException as e:
        print(f"Error fetching questions: {e}")
        return f"Error fetching questions: {e}", None
    except requests.exceptions.JSONDecodeError as e:
         print(f"Error decoding JSON response from questions endpoint: {e}")
         print(f"Response text: {response.text[:500]}")
         return f"Error decoding server response for questions: {e}", None
    except Exception as e:
        print(f"An unexpected error occurred fetching questions: {e}")
        return f"An unexpected error occurred fetching questions: {e}", None

    # 3. Run your Agent
    results_log = []
    answers_payload = []
    print(f"Running agent on {len(questions_data)} questions...")
    for item in questions_data:
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None:
            print(f"Skipping item with missing task_id or question: {item}")
            continue

        try:
            file_url = f"{api_url}/files/{task_id}"
            file_name = f"task_file_{task_id}"
            with open(file_name, "wb") as file:
                response = requests.get(file_url, timeout=15)
                file.write(response.content)
        except Exception as e:
            print(f"Expection occurred while trying to download {file_name} from {file_url}:", e)
            print("Didn't manage to download a file, probably it's not expected for this task")
        
        try:
            submitted_answer, logs = agent(question_text)
            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
            print(f"\n\n\n==============Finishing task id: {task_id}, question_text: {question_text}==============\n\n\n")
            sleep(2)
        except Exception as e:
             print(f"Error running agent on task {task_id}: {e}")
             results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
        finally:
            with suppress(Exception):
                os.remove(file_name)

    if not answers_payload:
        print("Agent did not produce any answers to submit.")
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    # 4. Prepare Submission 
    submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
    status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
    print(status_update)

    # 5. Submit
    print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        print("Submission successful.")
        results_df = pd.DataFrame(results_log)
        return final_status, results_df
    except requests.exceptions.HTTPError as e:
        error_detail = f"Server responded with status {e.response.status_code}."
        try:
            error_json = e.response.json()
            error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
        except requests.exceptions.JSONDecodeError:
            error_detail += f" Response: {e.response.text[:500]}"
        status_message = f"Submission Failed: {error_detail}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except requests.exceptions.Timeout:
        status_message = "Submission Failed: The request timed out."
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except requests.exceptions.RequestException as e:
        status_message = f"Submission Failed: Network error - {e}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except Exception as e:
        status_message = f"An unexpected error occurred during submission: {e}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df


def check_agent(question: str):
    agent = BasicAgent()
    final_answer, msgs = agent(question)
    return final_answer, "\n\n".join([str(msg) for msg in msgs])


# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown(
        """
        **Instructions:**

        1.  Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
        2.  Log in to your Hugging Face account using the button below. This uses your HF username for submission.
        3.  Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.

        ---
        **Disclaimers:**
        Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
        This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
        """
    )

    gr.LoginButton()

    run_button = gr.Button("Run Evaluation & Submit All Answers")

    status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
    # Removed max_rows=10 from DataFrame constructor
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(
        fn=run_and_submit_all,
        outputs=[status_output, results_table]
    )

    with gr.Row():
        question_input = gr.Textbox(label="Enter your question", placeholder="e.g., What is the capital of France?", lines=10)
        check_button = gr.Button("Check Answer")

    final_output = gr.Textbox(label="✅ Final Answer", lines=10, interactive=False)
    logs_output = gr.Textbox(label="📝 Agent Logs", lines=20, interactive=False)

    check_button.click(
        fn=check_agent,
        inputs=question_input,
        outputs=[final_output, logs_output]
    )

if __name__ == "__main__":
    print("\n" + "-"*30 + " App Starting " + "-"*30)
    # Check for SPACE_HOST and SPACE_ID at startup for information
    space_host_startup = os.getenv("SPACE_HOST")
    space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup

    if space_host_startup:
        print(f"✅ SPACE_HOST found: {space_host_startup}")
        print(f"   Runtime URL should be: https://{space_host_startup}.hf.space")
    else:
        print("ℹ️  SPACE_HOST environment variable not found (running locally?).")

    if space_id_startup: # Print repo URLs if SPACE_ID is found
        print(f"✅ SPACE_ID found: {space_id_startup}")
        print(f"   Repo URL: https://huggingface.co/spaces/{space_id_startup}")
        print(f"   Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
    else:
        print("ℹ️  SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")

    print("-"*(60 + len(" App Starting ")) + "\n")

    print("Launching Gradio Interface for Basic Agent Evaluation...")
    demo.launch(debug=True, share=False)