Solobrad commited on
Commit
00abb37
·
verified ·
1 Parent(s): 9a8b05a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -119
app.py CHANGED
@@ -1,126 +1,155 @@
1
- # app.py
2
  import os
3
- import gradio as gr
4
  import requests
5
- import pandas as pd
6
- from agent import agent
7
- import asyncio
8
- import nest_asyncio # NEW
9
- nest_asyncio.apply() # NEW
10
- # Constants
11
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
-
13
- # Async helper to run the agent
14
- async def run_agent(agent, question_text):
15
- return await agent.run(question_text)
16
-
17
- # Gradio Agent Interface
18
- def run_and_submit_all(profile: gr.OAuthProfile | None):
19
- """
20
- Fetches all questions, runs the LlamaIndexAgent on them, submits all answers,
21
- and displays the results.
22
- """
23
- # --- Determine HF Space Runtime URL and Repo URL ---
24
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
25
-
26
- if profile:
27
- username = f"{profile.username}"
28
- print(f"User logged in: {username}")
29
- else:
30
- print("User not logged in.")
31
- return "Please Login to Hugging Face with the button.", None
32
-
33
- api_url = DEFAULT_API_URL
34
- questions_url = f"{api_url}/questions"
35
- submit_url = f"{api_url}/submit"
36
-
37
- # 1. Instantiate LlamaIndexAgent
38
- print("Using imported agent instance.")
39
-
40
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
41
- print(agent_code)
42
-
43
- # 2. Fetch Questions
44
- print(f"Fetching questions from: {questions_url}")
45
- try:
46
- response = requests.get(questions_url, timeout=15)
47
- response.raise_for_status()
48
- questions_data = response.json()
49
- if not questions_data:
50
- print("Fetched questions list is empty.")
51
- return "Fetched questions list is empty or invalid format.", None
52
- print(f"Fetched {len(questions_data)} questions.")
53
- except requests.exceptions.RequestException as e:
54
- print(f"Error fetching questions: {e}")
55
- return f"Error fetching questions: {e}", None
56
-
57
- # 3. Run your LlamaIndex Agent
58
- results_log = []
59
- answers_payload = []
60
- print(f"Running agent on {len(questions_data)} questions...")
61
- for item in questions_data:
62
- task_id = item.get("task_id")
63
- question_text = item.get("question")
64
- if not task_id or question_text is None:
65
- print(f"Skipping item with missing task_id or question: {item}")
66
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  try:
68
- loop = asyncio.get_event_loop() # Get the current event loop
69
- submitted_answer = loop.run_until_complete(run_agent(agent, question_text))
70
- if not isinstance(submitted_answer, (str, dict, list, int, float, bool, type(None))):
71
- submitted_answer = str(submitted_answer)
72
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
73
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
74
  except Exception as e:
75
- print(f"Error running agent on task {task_id}: {e}")
76
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
77
-
78
- if not answers_payload:
79
- print("Agent did not produce any answers to submit.")
80
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
81
-
82
- # 4. Prepare Submission
83
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
84
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
85
- print(status_update)
86
-
87
- # 5. Submit
88
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
89
- try:
90
- response = requests.post(submit_url, json=submission_data, timeout=60)
91
- response.raise_for_status()
92
- result_data = response.json()
93
- final_status = (
94
- f"Submission Successful!\n"
95
- f"User: {result_data.get('username')}\n"
96
- f"Overall Score: {result_data.get('score', 'N/A')}% "
97
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
98
- f"Message: {result_data.get('message', 'No message received.')}"
99
- )
100
- print("Submission successful.")
101
- results_df = pd.DataFrame(results_log)
102
- return final_status, results_df
103
- except requests.exceptions.RequestException as e:
104
- print(f"Submission failed: {e}")
105
- return f"Submission failed: {e}", pd.DataFrame(results_log)
106
-
107
-
108
- # Gradio Interface
109
- with gr.Blocks() as demo:
110
- gr.Markdown("# LlamaIndex Agent Evaluation Runner")
111
 
112
- gr.LoginButton()
113
 
114
- run_button = gr.Button("Run Evaluation & Submit All Answers")
115
-
116
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
117
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
118
-
119
- run_button.click(
120
- fn=run_and_submit_all,
121
- outputs=[status_output, results_table]
122
- )
123
 
124
- if __name__ == "__main__":
125
- print("Launching Gradio Interface for LlamaIndex Agent Evaluation...")
126
- demo.launch(debug=True, share=False)
 
 
1
  import os
 
2
  import requests
3
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
4
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
5
+ from langchain_community.document_loaders import WikipediaLoader
6
+ from llama_index.core.tools.types import ToolMetadata
7
+ from llama_index.core.schema import Document
8
+ from llama_index.core.tools import FunctionTool
9
+ from langchain_community.tools.tavily_search import TavilySearchResults
10
+ from llama_index.core.agent.workflow import AgentWorkflow
11
+
12
+ hf_token = os.getenv("HF_TOKEN")
13
+
14
+ # List of models to try in order
15
+ model_list = [
16
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
17
+ "microsoft/phi-3-mini-128k-instruct",
18
+ "google/gemma-2b-it",
19
+ "gpt2"
20
+ ]
21
+
22
+ current_model_index = 0
23
+ llm = HuggingFaceInferenceAPI(
24
+ model_name=model_list[current_model_index],
25
+ token=hf_token,
26
+ )
27
+
28
+ # Numerical operation functions
29
+ def multiply(a: int, b: int) -> int:
30
+ """Multiply two numbers."""
31
+ return a * b
32
+
33
+ def add(a: int, b: int) -> int:
34
+ """Add two numbers."""
35
+ return a + b
36
+
37
+ def subtract(a: int, b: int) -> int:
38
+ """Subtract two numbers."""
39
+ return a - b
40
+
41
+ def divide(a: int, b: int) -> float:
42
+ """Divide two numbers, raises error on zero divisor."""
43
+ if b == 0:
44
+ raise ValueError("Cannot divide by zero.")
45
+ return a / b
46
+
47
+ def modulus(a: int, b: int) -> int:
48
+ """Get the modulus of two numbers."""
49
+ return a % b
50
+
51
+ # Web search tool function
52
+ def web_search(query: str) -> list:
53
+ """Search Tavily for a query and return up to 3 results."""
54
+ results = TavilySearchResults(max_results=3).invoke(query=query)
55
+ docs = []
56
+ for r in results:
57
+ meta = {"source": r.metadata.get("source", ""), "page": r.metadata.get("page", "")}
58
+ docs.append(Document(text=r.page_content, metadata=meta))
59
+ return docs
60
+
61
+ # Wikipedia search tool function
62
+ def wiki_search(query: str) -> list:
63
+ """Search Wikipedia for a query and return up to 2 results."""
64
+ results = WikipediaLoader(query=query, load_max_docs=2).load()
65
+ docs = []
66
+ for r in results:
67
+ meta = {"source": r.metadata.get("source", ""), "page": r.metadata.get("page", "")}
68
+ docs.append(Document(text=r.page_content, metadata=meta))
69
+ return docs
70
+
71
+ # Wrap functions into FunctionTool instances
72
+ web_search_tool = FunctionTool(
73
+ web_search,
74
+ metadata=ToolMetadata(name="web_search", description="Tavily 3-hit search")
75
+ )
76
+ wiki_search_tool = FunctionTool(
77
+ wiki_search,
78
+ metadata=ToolMetadata(name="wiki_search", description="Wikipedia 2-hit search")
79
+ )
80
+
81
+ multiply_tool = FunctionTool(multiply, metadata=ToolMetadata(name="multiply", description="Multiply two numbers."))
82
+ add_tool = FunctionTool(add, metadata=ToolMetadata(name="add", description="Add two numbers."))
83
+ subtract_tool = FunctionTool(subtract, metadata=ToolMetadata(name="subtract", description="Subtract two numbers."))
84
+ divide_tool = FunctionTool(divide, metadata=ToolMetadata(name="divide", description="Divide two numbers."))
85
+ modulus_tool = FunctionTool(modulus, metadata=ToolMetadata(name="modulus", description="Modulus operation on two numbers."))
86
+
87
+ # Aggregate all tools
88
+ tools = [
89
+ web_search_tool,
90
+ wiki_search_tool,
91
+ multiply_tool,
92
+ add_tool,
93
+ subtract_tool,
94
+ divide_tool,
95
+ modulus_tool,
96
+ ]
97
+
98
+ # Initialize agent
99
+ agent = AgentWorkflow.from_tools_or_functions(tools, llm=llm)
100
+
101
+ # Function to try the next model in the list
102
+ def try_next_model():
103
+ """Switch to the next model in the list and reinitialize the agent.
104
+ Returns True if successful, False if we've tried all models."""
105
+ global current_model_index, llm, agent
106
+
107
+ current_model_index += 1
108
+ if current_model_index >= len(model_list):
109
+ return False
110
+
111
+ # Reinitialize LLM with new model
112
+ llm = HuggingFaceInferenceAPI(
113
+ model_name=model_list[current_model_index],
114
+ token=hf_token,
115
+ )
116
+
117
+ # Reinitialize agent with new LLM
118
+ agent = AgentWorkflow.from_tools_or_functions(tools, llm=llm)
119
+ return True
120
+
121
+ # Run with fallback logic
122
+ def run_with_fallback(query: str):
123
+ global current_model_index, llm, agent
124
+
125
+ # Reset to first model if we're not already on it
126
+ if current_model_index != 0:
127
+ current_model_index = 0
128
+ llm = HuggingFaceInferenceAPI(
129
+ model_name=model_list[current_model_index],
130
+ token=hf_token,
131
+ )
132
+ agent = AgentWorkflow.from_tools_or_functions(tools, llm=llm)
133
+
134
+ # Try each model in sequence
135
+ for i in range(len(model_list)):
136
  try:
137
+ result = agent.run(query)
138
+ print(f"Successfully ran query with model: {model_list[current_model_index]}")
139
+ return result
 
 
 
140
  except Exception as e:
141
+ print(f"Error with model {model_list[current_model_index]}: {e}")
142
+ if i < len(model_list) - 1: # If not the last model
143
+ try_next_model()
144
+ else:
145
+ break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
+ return "Sorry, encountered issues with all models."
148
 
149
+ # Make agent.run() work with asyncio by adding async support
150
+ async def run(query: str):
151
+ """Async wrapper for the agent.run method to be compatible with app.py"""
152
+ return run_with_fallback(query)
 
 
 
 
 
153
 
154
+ # Add the async run method to the agent object
155
+ agent.run = run_with_fallback # Replace with synchronous version for direct calls