Solobrad commited on
Commit
be59d27
·
verified ·
1 Parent(s): 00abb37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -146
app.py CHANGED
@@ -1,155 +1,144 @@
 
 
1
  import os
 
2
  import requests
3
- from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
4
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
5
- from langchain_community.document_loaders import WikipediaLoader
6
- from llama_index.core.tools.types import ToolMetadata
7
- from llama_index.core.schema import Document
8
- from llama_index.core.tools import FunctionTool
9
- from langchain_community.tools.tavily_search import TavilySearchResults
10
- from llama_index.core.agent.workflow import AgentWorkflow
11
-
12
- hf_token = os.getenv("HF_TOKEN")
13
-
14
- # List of models to try in order
15
- model_list = [
16
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
17
- "microsoft/phi-3-mini-128k-instruct",
18
- "google/gemma-2b-it",
19
- "gpt2"
20
- ]
21
-
22
- current_model_index = 0
23
- llm = HuggingFaceInferenceAPI(
24
- model_name=model_list[current_model_index],
25
- token=hf_token,
26
- )
27
-
28
- # Numerical operation functions
29
- def multiply(a: int, b: int) -> int:
30
- """Multiply two numbers."""
31
- return a * b
32
-
33
- def add(a: int, b: int) -> int:
34
- """Add two numbers."""
35
- return a + b
36
-
37
- def subtract(a: int, b: int) -> int:
38
- """Subtract two numbers."""
39
- return a - b
40
-
41
- def divide(a: int, b: int) -> float:
42
- """Divide two numbers, raises error on zero divisor."""
43
- if b == 0:
44
- raise ValueError("Cannot divide by zero.")
45
- return a / b
46
-
47
- def modulus(a: int, b: int) -> int:
48
- """Get the modulus of two numbers."""
49
- return a % b
50
-
51
- # Web search tool function
52
- def web_search(query: str) -> list:
53
- """Search Tavily for a query and return up to 3 results."""
54
- results = TavilySearchResults(max_results=3).invoke(query=query)
55
- docs = []
56
- for r in results:
57
- meta = {"source": r.metadata.get("source", ""), "page": r.metadata.get("page", "")}
58
- docs.append(Document(text=r.page_content, metadata=meta))
59
- return docs
60
-
61
- # Wikipedia search tool function
62
- def wiki_search(query: str) -> list:
63
- """Search Wikipedia for a query and return up to 2 results."""
64
- results = WikipediaLoader(query=query, load_max_docs=2).load()
65
- docs = []
66
- for r in results:
67
- meta = {"source": r.metadata.get("source", ""), "page": r.metadata.get("page", "")}
68
- docs.append(Document(text=r.page_content, metadata=meta))
69
- return docs
70
-
71
- # Wrap functions into FunctionTool instances
72
- web_search_tool = FunctionTool(
73
- web_search,
74
- metadata=ToolMetadata(name="web_search", description="Tavily 3-hit search")
75
- )
76
- wiki_search_tool = FunctionTool(
77
- wiki_search,
78
- metadata=ToolMetadata(name="wiki_search", description="Wikipedia 2-hit search")
79
- )
80
-
81
- multiply_tool = FunctionTool(multiply, metadata=ToolMetadata(name="multiply", description="Multiply two numbers."))
82
- add_tool = FunctionTool(add, metadata=ToolMetadata(name="add", description="Add two numbers."))
83
- subtract_tool = FunctionTool(subtract, metadata=ToolMetadata(name="subtract", description="Subtract two numbers."))
84
- divide_tool = FunctionTool(divide, metadata=ToolMetadata(name="divide", description="Divide two numbers."))
85
- modulus_tool = FunctionTool(modulus, metadata=ToolMetadata(name="modulus", description="Modulus operation on two numbers."))
86
-
87
- # Aggregate all tools
88
- tools = [
89
- web_search_tool,
90
- wiki_search_tool,
91
- multiply_tool,
92
- add_tool,
93
- subtract_tool,
94
- divide_tool,
95
- modulus_tool,
96
- ]
97
-
98
- # Initialize agent
99
- agent = AgentWorkflow.from_tools_or_functions(tools, llm=llm)
100
-
101
- # Function to try the next model in the list
102
- def try_next_model():
103
- """Switch to the next model in the list and reinitialize the agent.
104
- Returns True if successful, False if we've tried all models."""
105
- global current_model_index, llm, agent
106
-
107
- current_model_index += 1
108
- if current_model_index >= len(model_list):
109
- return False
110
-
111
- # Reinitialize LLM with new model
112
- llm = HuggingFaceInferenceAPI(
113
- model_name=model_list[current_model_index],
114
- token=hf_token,
115
- )
116
 
117
- # Reinitialize agent with new LLM
118
- agent = AgentWorkflow.from_tools_or_functions(tools, llm=llm)
119
- return True
120
-
121
- # Run with fallback logic
122
- def run_with_fallback(query: str):
123
- global current_model_index, llm, agent
124
-
125
- # Reset to first model if we're not already on it
126
- if current_model_index != 0:
127
- current_model_index = 0
128
- llm = HuggingFaceInferenceAPI(
129
- model_name=model_list[current_model_index],
130
- token=hf_token,
131
- )
132
- agent = AgentWorkflow.from_tools_or_functions(tools, llm=llm)
133
 
134
- # Try each model in sequence
135
- for i in range(len(model_list)):
 
 
 
 
136
  try:
137
- result = agent.run(query)
138
- print(f"Successfully ran query with model: {model_list[current_model_index]}")
139
- return result
 
 
 
 
 
 
 
140
  except Exception as e:
141
- print(f"Error with model {model_list[current_model_index]}: {e}")
142
- if i < len(model_list) - 1: # If not the last model
143
- try_next_model()
144
- else:
145
- break
146
 
147
- return "Sorry, encountered issues with all models."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- # Make agent.run() work with asyncio by adding async support
150
- async def run(query: str):
151
- """Async wrapper for the agent.run method to be compatible with app.py"""
152
- return run_with_fallback(query)
 
 
 
 
 
153
 
154
- # Add the async run method to the agent object
155
- agent.run = run_with_fallback # Replace with synchronous version for direct calls
 
 
1
+
2
+ # app.py
3
  import os
4
+ import gradio as gr
5
  import requests
6
+ import pandas as pd
7
+ from agent import agent, run_with_fallback # Import run_with_fallback directly
8
+ import asyncio
9
+ import nest_asyncio
10
+ nest_asyncio.apply()
11
+
12
+ # Constants
13
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
+
15
+ # Async helper to run the agent - modified to use run_with_fallback
16
+ async def run_agent(agent, question_text):
17
+ """Run the agent in a way that's compatible with asyncio"""
18
+ # Create a new event loop for this function call to avoid nesting issues
19
+ loop = asyncio.get_event_loop()
20
+ # Run the synchronous function in the executor
21
+ return await loop.run_in_executor(None, run_with_fallback, question_text)
22
+
23
+ # Gradio Agent Interface
24
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
25
+ """
26
+ Fetches all questions, runs the LlamaIndexAgent on them, submits all answers,
27
+ and displays the results.
28
+ """
29
+ # --- Determine HF Space Runtime URL and Repo URL ---
30
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
31
+
32
+ if profile:
33
+ username = f"{profile.username}"
34
+ print(f"User logged in: {username}")
35
+ else:
36
+ print("User not logged in.")
37
+ return "Please Login to Hugging Face with the button.", None
38
+
39
+ api_url = DEFAULT_API_URL
40
+ questions_url = f"{api_url}/questions"
41
+ submit_url = f"{api_url}/submit"
42
+
43
+ # 1. Instantiate LlamaIndexAgent
44
+ print("Using imported agent instance.")
45
+
46
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
47
+ print(agent_code)
48
+
49
+ # 2. Fetch Questions
50
+ print(f"Fetching questions from: {questions_url}")
51
+ try:
52
+ response = requests.get(questions_url, timeout=15)
53
+ response.raise_for_status()
54
+ questions_data = response.json()
55
+ if not questions_data:
56
+ print("Fetched questions list is empty.")
57
+ return "Fetched questions list is empty or invalid format.", None
58
+ print(f"Fetched {len(questions_data)} questions.")
59
+ except requests.exceptions.RequestException as e:
60
+ print(f"Error fetching questions: {e}")
61
+ return f"Error fetching questions: {e}", None
62
+
63
+ # 3. Run your LlamaIndex Agent
64
+ results_log = []
65
+ answers_payload = []
66
+ print(f"Running agent on {len(questions_data)} questions...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ # Create a new event loop for this function
69
+ loop = asyncio.new_event_loop()
70
+ asyncio.set_event_loop(loop)
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ for item in questions_data:
73
+ task_id = item.get("task_id")
74
+ question_text = item.get("question")
75
+ if not task_id or question_text is None:
76
+ print(f"Skipping item with missing task_id or question: {item}")
77
+ continue
78
  try:
79
+ # Run the async function in the loop
80
+ submitted_answer = loop.run_until_complete(run_agent(agent, question_text))
81
+
82
+ # Ensure serializable response
83
+ if not isinstance(submitted_answer, (str, dict, list, int, float, bool, type(None))):
84
+ submitted_answer = str(submitted_answer)
85
+
86
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
87
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
88
+
89
  except Exception as e:
90
+ print(f"Error running agent on task {task_id}: {e}")
91
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
 
 
92
 
93
+ # Close the loop when done
94
+ loop.close()
95
+
96
+ if not answers_payload:
97
+ print("Agent did not produce any answers to submit.")
98
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
99
+
100
+ # 4. Prepare Submission
101
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
102
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
103
+ print(status_update)
104
+
105
+ # 5. Submit
106
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
107
+ try:
108
+ response = requests.post(submit_url, json=submission_data, timeout=60)
109
+ response.raise_for_status()
110
+ result_data = response.json()
111
+ final_status = (
112
+ f"Submission Successful!\n"
113
+ f"User: {result_data.get('username')}\n"
114
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
115
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
116
+ f"Message: {result_data.get('message', 'No message received.')}"
117
+ )
118
+ print("Submission successful.")
119
+ results_df = pd.DataFrame(results_log)
120
+ return final_status, results_df
121
+ except requests.exceptions.RequestException as e:
122
+ print(f"Submission failed: {e}")
123
+ return f"Submission failed: {e}", pd.DataFrame(results_log)
124
+
125
+
126
+ # Gradio Interface
127
+ with gr.Blocks() as demo:
128
+ gr.Markdown("# LlamaIndex Agent Evaluation Runner")
129
+
130
+ gr.LoginButton()
131
 
132
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
133
+
134
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
135
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
136
+
137
+ run_button.click(
138
+ fn=run_and_submit_all,
139
+ outputs=[status_output, results_table]
140
+ )
141
 
142
+ if __name__ == "__main__":
143
+ print("Launching Gradio Interface for LlamaIndex Agent Evaluation...")
144
+ demo.launch(debug=True, share=False)