Trial and error.
Browse files- app.py +53 -178
- requirements.txt +3 -1
app.py
CHANGED
|
@@ -4,10 +4,8 @@ import requests
|
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
from huggingface_hub import InferenceClient # Import Hugging Face InferenceClient
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
# --- Constants ---
|
| 10 |
-
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 11 |
|
| 12 |
# --- Basic Agent Definition ---
|
| 13 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
@@ -16,190 +14,67 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
| 16 |
class BasicAgent:
|
| 17 |
def __init__(self):
|
| 18 |
print("BasicAgent initialized.")
|
| 19 |
-
# Use Qwen2.5-7B-Instruct model
|
| 20 |
-
self.model_name = "Qwen/Qwen2.5-7B-Instruct"
|
| 21 |
-
self.hf_token = os.getenv("HF_TOKEN") # Get token from environment if available
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
try:
|
| 24 |
-
|
| 25 |
-
self.
|
| 26 |
-
|
| 27 |
-
|
|
|
|
| 28 |
)
|
| 29 |
-
print(f"
|
| 30 |
except Exception as e:
|
| 31 |
-
print(f"Error
|
| 32 |
-
|
| 33 |
-
print("
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
Use an LLM to break down a complex question into key search terms or sub-questions.
|
| 38 |
-
|
| 39 |
-
Args:
|
| 40 |
-
question (str): The original question
|
| 41 |
-
|
| 42 |
-
Returns:
|
| 43 |
-
list: A list of key search terms or sub-questions
|
| 44 |
-
"""
|
| 45 |
-
try:
|
| 46 |
-
print(f"Breaking down question with LLM: {question[:50]}...")
|
| 47 |
-
|
| 48 |
-
# Create a prompt that asks the LLM to break down the question
|
| 49 |
-
prompt = f"""
|
| 50 |
-
Please break down this question into 2-3 key search queries that would help find information to answer it.
|
| 51 |
-
Return ONLY the search queries, one per line, with no additional text or explanations.
|
| 52 |
-
|
| 53 |
-
Question: {question}
|
| 54 |
-
"""
|
| 55 |
-
|
| 56 |
-
# Call the Hugging Face model to get the breakdown
|
| 57 |
-
response = self.hf_client.text_generation(
|
| 58 |
-
prompt=prompt,
|
| 59 |
-
max_new_tokens=150,
|
| 60 |
-
temperature=0.3,
|
| 61 |
-
repetition_penalty=1.1,
|
| 62 |
-
do_sample=True
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
# Extract the search terms from the response
|
| 66 |
-
search_terms = response.strip().split('\n')
|
| 67 |
-
search_terms = [term.strip() for term in search_terms if term.strip()]
|
| 68 |
-
|
| 69 |
-
# Limit to 3 search terms maximum
|
| 70 |
-
search_terms = search_terms[:3]
|
| 71 |
-
|
| 72 |
-
print(f"Question broken down into {len(search_terms)} search terms: {search_terms}")
|
| 73 |
-
return search_terms
|
| 74 |
-
|
| 75 |
-
except Exception as e:
|
| 76 |
-
print(f"Error breaking down question: {e}")
|
| 77 |
-
# If there's an error, return the original question as a fallback
|
| 78 |
-
return [question]
|
| 79 |
-
|
| 80 |
-
def search_internet(self, query: str) -> str:
|
| 81 |
-
"""
|
| 82 |
-
Search the internet for information using Wikipedia's API.
|
| 83 |
-
This is a simple implementation that returns search results as text.
|
| 84 |
-
|
| 85 |
-
Args:
|
| 86 |
-
query (str): The search query
|
| 87 |
-
|
| 88 |
-
Returns:
|
| 89 |
-
str: Search results as text
|
| 90 |
-
"""
|
| 91 |
-
print(f"Searching internet for: {query}")
|
| 92 |
-
try:
|
| 93 |
-
# Use Wikipedia API to search for information
|
| 94 |
-
headers = {
|
| 95 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
| 96 |
-
}
|
| 97 |
-
|
| 98 |
-
# Step 1: Search for relevant articles
|
| 99 |
-
search_url = f"https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={query}&format=json"
|
| 100 |
-
search_response = requests.get(search_url, headers=headers, timeout=10)
|
| 101 |
-
search_response.raise_for_status()
|
| 102 |
-
search_data = search_response.json()
|
| 103 |
-
|
| 104 |
-
# Check if we found any search results
|
| 105 |
-
if 'query' not in search_data or 'search' not in search_data['query'] or not search_data['query']['search']:
|
| 106 |
-
return "No relevant information found."
|
| 107 |
-
|
| 108 |
-
# Get the title of the first (most relevant) result
|
| 109 |
-
first_result = search_data['query']['search'][0]
|
| 110 |
-
page_title = first_result['title']
|
| 111 |
-
|
| 112 |
-
# Step 2: Fetch the content of the most relevant article
|
| 113 |
-
content_url = f"https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exintro=1&explaintext=1&titles={page_title}&format=json"
|
| 114 |
-
content_response = requests.get(content_url, headers=headers, timeout=10)
|
| 115 |
-
content_response.raise_for_status()
|
| 116 |
-
content_data = content_response.json()
|
| 117 |
-
|
| 118 |
-
# Extract the page content
|
| 119 |
-
pages = content_data['query']['pages']
|
| 120 |
-
page_id = list(pages.keys())[0]
|
| 121 |
-
|
| 122 |
-
if 'extract' in pages[page_id]:
|
| 123 |
-
extract = pages[page_id]['extract']
|
| 124 |
-
# Limit extract length to avoid very long responses
|
| 125 |
-
if len(extract) > 1000:
|
| 126 |
-
extract = extract[:1000] + "..."
|
| 127 |
-
|
| 128 |
-
result = f"Wikipedia article: {page_title}\n\n{extract}"
|
| 129 |
-
|
| 130 |
-
# Also get a few more related article titles
|
| 131 |
-
related_titles = []
|
| 132 |
-
for item in search_data['query']['search'][1:4]: # Get next 3 results
|
| 133 |
-
related_titles.append(item['title'])
|
| 134 |
-
|
| 135 |
-
if related_titles:
|
| 136 |
-
result += "\n\nRelated topics:\n"
|
| 137 |
-
for title in related_titles:
|
| 138 |
-
result += f"- {title}\n"
|
| 139 |
-
|
| 140 |
-
return result
|
| 141 |
-
else:
|
| 142 |
-
return "Found a relevant page, but couldn't extract its content."
|
| 143 |
-
|
| 144 |
-
except Exception as e:
|
| 145 |
-
print(f"Error searching internet: {e}")
|
| 146 |
-
return f"Error performing internet search: {str(e)}"
|
| 147 |
|
| 148 |
def __call__(self, question: str) -> str:
|
| 149 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
# Use Hugging Face model to synthesize a coherent answer from the search results
|
| 167 |
-
try:
|
| 168 |
-
synthesis_prompt = f"""
|
| 169 |
-
Based on the following search results, please provide a comprehensive answer to this question:
|
| 170 |
-
|
| 171 |
-
Question: {question}
|
| 172 |
-
|
| 173 |
-
Search Results:
|
| 174 |
-
{combined_results}
|
| 175 |
-
|
| 176 |
-
Answer:
|
| 177 |
-
"""
|
| 178 |
-
|
| 179 |
-
# Call the Hugging Face model to synthesize an answer
|
| 180 |
-
response = self.hf_client.text_generation(
|
| 181 |
-
prompt=synthesis_prompt,
|
| 182 |
-
max_new_tokens=500,
|
| 183 |
-
temperature=0.5,
|
| 184 |
-
repetition_penalty=1.05,
|
| 185 |
-
do_sample=True
|
| 186 |
)
|
|
|
|
| 187 |
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
# Fallback to
|
| 195 |
-
answer =
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
else:
|
| 199 |
-
# Fallback to default answer if all searches fail
|
| 200 |
-
answer = "I couldn't find specific information about that question."
|
| 201 |
-
print("Agent returning default answer as searches found no useful information.")
|
| 202 |
return answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
|
| 204 |
|
| 205 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
@@ -327,7 +202,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 327 |
|
| 328 |
# --- Build Gradio Interface using Blocks ---
|
| 329 |
with gr.Blocks() as demo:
|
| 330 |
-
gr.Markdown("# Basic Agent Evaluation Runner
|
| 331 |
gr.Markdown(
|
| 332 |
"""
|
| 333 |
**Instructions:**
|
|
|
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
from huggingface_hub import InferenceClient # Import Hugging Face InferenceClient
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# --- Basic Agent Definition ---
|
| 11 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
| 14 |
class BasicAgent:
|
| 15 |
def __init__(self):
|
| 16 |
print("BasicAgent initialized.")
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
print("Loading Qwen2.5-7B-Instruct model...")
|
| 19 |
+
self.model_name = "Qwen/Qwen2.5-7B-Instruct"
|
| 20 |
+
|
| 21 |
+
# Load model and tokenizer
|
| 22 |
try:
|
| 23 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 24 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
| 25 |
+
self.model_name,
|
| 26 |
+
torch_dtype="auto",
|
| 27 |
+
device_map="auto"
|
| 28 |
)
|
| 29 |
+
print(f"Successfully loaded {self.model_name}")
|
| 30 |
except Exception as e:
|
| 31 |
+
print(f"Error loading model: {e}")
|
| 32 |
+
# Fallback to HuggingFace Inference API if local loading fails
|
| 33 |
+
print("Falling back to InferenceClient")
|
| 34 |
+
self.client = InferenceClient(model=self.model_name)
|
| 35 |
+
self.tokenizer = None
|
| 36 |
+
self.model = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
def __call__(self, question: str) -> str:
|
| 39 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
# Create messages for the model
|
| 43 |
+
messages = [
|
| 44 |
+
{"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
|
| 45 |
+
{"role": "user", "content": question}
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
# Generate response
|
| 49 |
+
if self.model and self.tokenizer:
|
| 50 |
+
# Local model generation
|
| 51 |
+
text = self.tokenizer.apply_chat_template(
|
| 52 |
+
messages,
|
| 53 |
+
tokenize=False,
|
| 54 |
+
add_generation_prompt=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
)
|
| 56 |
+
model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
|
| 57 |
|
| 58 |
+
generated_ids = self.model.generate(
|
| 59 |
+
**model_inputs,
|
| 60 |
+
max_new_tokens=512
|
| 61 |
+
)
|
| 62 |
+
generated_ids = [
|
| 63 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
| 64 |
+
]
|
| 65 |
|
| 66 |
+
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 67 |
+
else:
|
| 68 |
+
# Fallback to Inference API
|
| 69 |
+
answer = self.client.chat(messages=messages)
|
| 70 |
+
|
| 71 |
+
print(f"Agent generated response (first 50 chars): {answer[:50]}...")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
return answer
|
| 73 |
+
except Exception as e:
|
| 74 |
+
print(f"Error generating response: {e}")
|
| 75 |
+
fallback_answer = "I apologize, but I encountered an error when trying to answer your question."
|
| 76 |
+
print(f"Agent returning fallback answer: {fallback_answer}")
|
| 77 |
+
return fallback_answer
|
| 78 |
|
| 79 |
|
| 80 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
|
|
| 202 |
|
| 203 |
# --- Build Gradio Interface using Blocks ---
|
| 204 |
with gr.Blocks() as demo:
|
| 205 |
+
gr.Markdown("# Basic Agent Evaluation Runner #")
|
| 206 |
gr.Markdown(
|
| 207 |
"""
|
| 208 |
**Instructions:**
|
requirements.txt
CHANGED
|
@@ -1,3 +1,5 @@
|
|
| 1 |
gradio
|
| 2 |
requests
|
| 3 |
-
huggingface_hub
|
|
|
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
requests
|
| 3 |
+
huggingface_hub
|
| 4 |
+
transformers
|
| 5 |
+
torch
|