Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import sys
|
|
| 3 |
import subprocess
|
| 4 |
|
| 5 |
# 检查并安装缺失的依赖
|
| 6 |
-
required_packages = ["litellm", "duckduckgo-search", "gradio", "requests", "pandas"]
|
| 7 |
for package in required_packages:
|
| 8 |
try:
|
| 9 |
__import__(package)
|
|
@@ -33,7 +33,7 @@ class DuckDuckGoSearchTool:
|
|
| 33 |
self.name = "duckduckgo_search"
|
| 34 |
self.description = "Search the web using DuckDuckGo"
|
| 35 |
|
| 36 |
-
def search(self, query: str, max_results: int =
|
| 37 |
"""
|
| 38 |
Search the web using DuckDuckGo and return results.
|
| 39 |
|
|
@@ -52,7 +52,7 @@ class DuckDuckGoSearchTool:
|
|
| 52 |
print(f"DuckDuckGo search error: {e}")
|
| 53 |
return [{"title": f"Search error: {e}", "body": "", "href": ""}]
|
| 54 |
|
| 55 |
-
def __call__(self, query: str, max_results: int =
|
| 56 |
"""
|
| 57 |
Execute the search and return results in a structured format.
|
| 58 |
|
|
@@ -122,7 +122,7 @@ class CodeAgent:
|
|
| 122 |
"""Format search results into a readable string"""
|
| 123 |
formatted = "Search Results:\n"
|
| 124 |
if not results:
|
| 125 |
-
return "No search results found.\n\n"
|
| 126 |
for i, result in enumerate(results, 1):
|
| 127 |
formatted += f"{i}. {result.get('title', 'No title')}\n"
|
| 128 |
formatted += f" {result.get('body', 'No description')[:200]}...\n"
|
|
@@ -142,45 +142,28 @@ class CodeAgent:
|
|
| 142 |
)
|
| 143 |
|
| 144 |
def create_prompt(self, question: str, search_results: Optional[List[Dict[str, str]]] = None) -> str:
|
| 145 |
-
"""Create a prompt for the model with optional search results"""
|
| 146 |
prompt_parts = []
|
| 147 |
|
| 148 |
if search_results:
|
| 149 |
prompt_parts.append(self.format_search_results(search_results))
|
| 150 |
prompt_parts.append(
|
| 151 |
-
"
|
| 152 |
-
"
|
| 153 |
-
"
|
| 154 |
-
"- If the search results are relevant but do not directly answer the question, you may synthesize information if explicitly asked to, otherwise state what is found.\n"
|
| 155 |
-
"- If the search results are irrelevant or insufficient, state that the answer cannot be found in the provided search results and then use your general knowledge if applicable.\n\n"
|
| 156 |
)
|
| 157 |
else:
|
| 158 |
-
prompt_parts.append("No
|
| 159 |
|
| 160 |
prompt_parts.append(f"Question: {question}\n")
|
| 161 |
prompt_parts.append(
|
| 162 |
-
"\
|
| 163 |
-
"
|
| 164 |
-
"
|
| 165 |
-
"
|
| 166 |
-
"
|
| 167 |
-
"
|
| 168 |
-
"
|
| 169 |
-
" - If the question asks for a list of items, provide them as a comma-separated list without numbering or bullet points (e.g., 'red, green, blue').\n"
|
| 170 |
-
" - If a yes/no answer is appropriate, provide 'Yes' or 'No'.\n"
|
| 171 |
-
" - For other types of questions, provide the most direct and brief factual answer.\n"
|
| 172 |
-
"5. Unknown Answers: If, and only if, you cannot confidently determine a factual answer from the search results (if provided) or your general knowledge, or if the question is unanswerable (e.g., requires processing an image you cannot see, or is nonsensical), you MUST respond with the single word 'Unknown'. Do not guess or provide speculative information.\n"
|
| 173 |
-
"\nExample Scenarios:\n"
|
| 174 |
-
"- Question: What is the capital of France?\n"
|
| 175 |
-
" Correct Answer: Paris\n"
|
| 176 |
-
"- Question: How many moons does Earth have?\n"
|
| 177 |
-
" Correct Answer: 1\n"
|
| 178 |
-
"- Question: List the primary colors.\n"
|
| 179 |
-
" Correct Answer: red, yellow, blue\n"
|
| 180 |
-
"- Question: Does a dog meow?\n"
|
| 181 |
-
" Correct Answer: No\n"
|
| 182 |
-
"- Question: (A question where the answer is truly unknowable or unfindable for you)\n"
|
| 183 |
-
" Correct Answer: Unknown\n"
|
| 184 |
)
|
| 185 |
prompt_parts.append("\nAnswer: ")
|
| 186 |
|
|
@@ -200,7 +183,6 @@ class CodeAgent:
|
|
| 200 |
|
| 201 |
question_lower = question.lower()
|
| 202 |
|
| 203 |
-
# 更新 should_search 逻辑
|
| 204 |
search_trigger_keywords = [
|
| 205 |
"what", "who", "when", "where", "how many", "which", "list", "name", "find", "does",
|
| 206 |
"is there", "are there", "can you tell me", "describe", "published by", "released by",
|
|
@@ -220,13 +202,13 @@ class CodeAgent:
|
|
| 220 |
|
| 221 |
if '.remna eht sa "tfel" drow eht fo etisoppo eht etirw' in question_lower:
|
| 222 |
should_search = False
|
| 223 |
-
if "chess position provided in the image" in question_lower or "image." in question_lower:
|
| 224 |
should_search = False
|
| 225 |
|
| 226 |
search_results = None
|
| 227 |
if should_search and self.search_tool:
|
| 228 |
print(f"Searching for information about: {question}")
|
| 229 |
-
search_response = self.search_tool(question, max_results=
|
| 230 |
search_results = search_response.get("results", [])
|
| 231 |
print(f"Found {len(search_results)} search results")
|
| 232 |
|
|
@@ -234,8 +216,6 @@ class CodeAgent:
|
|
| 234 |
system_prompt = self.create_system_prompt()
|
| 235 |
|
| 236 |
print("Generating response with LLM...")
|
| 237 |
-
# print(f"System Prompt: {system_prompt}") # For debugging prompts
|
| 238 |
-
# print(f"User Prompt: {prompt}") # For debugging prompts
|
| 239 |
response = self.model.generate(prompt, system_prompt)
|
| 240 |
|
| 241 |
answer = response.strip()
|
|
@@ -246,7 +226,7 @@ class CodeAgent:
|
|
| 246 |
]
|
| 247 |
|
| 248 |
for prefix in prefixes_to_remove:
|
| 249 |
-
if answer.lower().startswith(prefix.lower()):
|
| 250 |
answer = answer[len(prefix):].strip()
|
| 251 |
|
| 252 |
if (answer.startswith('"') and answer.endswith('"')) or \
|
|
@@ -272,7 +252,7 @@ def run_and_submit_all():
|
|
| 272 |
if not api_key:
|
| 273 |
return "Error: GEMINI_API_KEY environment variable not found. Please set it in your Space settings.", None
|
| 274 |
|
| 275 |
-
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=
|
| 276 |
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
|
| 277 |
except Exception as e:
|
| 278 |
print(f"Error instantiating agent: {e}")
|
|
@@ -280,12 +260,12 @@ def run_and_submit_all():
|
|
| 280 |
|
| 281 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 282 |
if not space_id:
|
| 283 |
-
agent_code = "https://huggingface.co/spaces/lethaq/Final_Assignment_Template/tree/main"
|
| 284 |
print(f"Agent code URL: {agent_code}")
|
| 285 |
|
| 286 |
print(f"Fetching questions from: {questions_url}")
|
| 287 |
try:
|
| 288 |
-
response = requests.get(questions_url, timeout=20)
|
| 289 |
response.raise_for_status()
|
| 290 |
questions_data = response.json()
|
| 291 |
if not questions_data:
|
|
@@ -394,7 +374,7 @@ with gr.Blocks() as demo:
|
|
| 394 |
|
| 395 |
gr.Markdown("## Test Single Question")
|
| 396 |
with gr.Row():
|
| 397 |
-
question_in = gr.Textbox(label="Question", lines=3, placeholder="Enter your question here...")
|
| 398 |
answer_out = gr.Textbox(label="Answer", lines=3, interactive=False)
|
| 399 |
|
| 400 |
test_btn = gr.Button("Test Question", variant="secondary")
|
|
@@ -407,7 +387,7 @@ with gr.Blocks() as demo:
|
|
| 407 |
if not api_key:
|
| 408 |
return "Error: GEMINI_API_KEY environment variable not found"
|
| 409 |
|
| 410 |
-
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=
|
| 411 |
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
|
| 412 |
answer = agent(question)
|
| 413 |
return answer
|
|
|
|
| 3 |
import subprocess
|
| 4 |
|
| 5 |
# 检查并安装缺失的依赖
|
| 6 |
+
required_packages = ["litellm", "duckduckgo-search", "gradio", "requests", "pandas"]
|
| 7 |
for package in required_packages:
|
| 8 |
try:
|
| 9 |
__import__(package)
|
|
|
|
| 33 |
self.name = "duckduckgo_search"
|
| 34 |
self.description = "Search the web using DuckDuckGo"
|
| 35 |
|
| 36 |
+
def search(self, query: str, max_results: int = 3) -> List[Dict[str, str]]: # Default max_results set to 3
|
| 37 |
"""
|
| 38 |
Search the web using DuckDuckGo and return results.
|
| 39 |
|
|
|
|
| 52 |
print(f"DuckDuckGo search error: {e}")
|
| 53 |
return [{"title": f"Search error: {e}", "body": "", "href": ""}]
|
| 54 |
|
| 55 |
+
def __call__(self, query: str, max_results: int = 3) -> Dict[str, Any]: # Default max_results set to 3
|
| 56 |
"""
|
| 57 |
Execute the search and return results in a structured format.
|
| 58 |
|
|
|
|
| 122 |
"""Format search results into a readable string"""
|
| 123 |
formatted = "Search Results:\n"
|
| 124 |
if not results:
|
| 125 |
+
return "No search results found.\n\n" # Added a case for no results
|
| 126 |
for i, result in enumerate(results, 1):
|
| 127 |
formatted += f"{i}. {result.get('title', 'No title')}\n"
|
| 128 |
formatted += f" {result.get('body', 'No description')[:200]}...\n"
|
|
|
|
| 142 |
)
|
| 143 |
|
| 144 |
def create_prompt(self, question: str, search_results: Optional[List[Dict[str, str]]] = None) -> str:
|
| 145 |
+
"""Create a prompt for the model with optional search results - Simplified Version"""
|
| 146 |
prompt_parts = []
|
| 147 |
|
| 148 |
if search_results:
|
| 149 |
prompt_parts.append(self.format_search_results(search_results))
|
| 150 |
prompt_parts.append(
|
| 151 |
+
"Instruction: Use the Search Results provided above to answer the Question. "
|
| 152 |
+
"Focus on extracting the specific fact, number, or list requested. "
|
| 153 |
+
"If the search results are clearly irrelevant or do not contain the answer, then try to use your general knowledge.\n\n"
|
|
|
|
|
|
|
| 154 |
)
|
| 155 |
else:
|
| 156 |
+
prompt_parts.append("Instruction: No search results were provided. Answer the Question using your general knowledge.\n\n")
|
| 157 |
|
| 158 |
prompt_parts.append(f"Question: {question}\n")
|
| 159 |
prompt_parts.append(
|
| 160 |
+
"\nAnswer Guidelines (Strictly Follow):\n"
|
| 161 |
+
"1. Your answer MUST be ONLY the specific fact, number(s), or comma-separated list requested by the question. NO extra words, explanations, or conversational filler like 'The answer is...'.\n"
|
| 162 |
+
"2. Example - Question: 'What is the capital of France?' Expected Answer: Paris\n"
|
| 163 |
+
"3. Example - Question: 'How many days are there in a standard week?' Expected Answer: 7\n"
|
| 164 |
+
"4. Example - Question: 'List the first three positive even numbers.' Expected Answer: 2, 4, 6\n"
|
| 165 |
+
"5. If, after carefully checking the search results (if any were provided) and your own knowledge, you absolutely cannot determine a factual answer, or if the question is impossible for you to answer (e.g. requires seeing an image), then respond with the single word: Unknown\n"
|
| 166 |
+
" Do not guess. 'Unknown' is only for when you are certain you don't know.\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
)
|
| 168 |
prompt_parts.append("\nAnswer: ")
|
| 169 |
|
|
|
|
| 183 |
|
| 184 |
question_lower = question.lower()
|
| 185 |
|
|
|
|
| 186 |
search_trigger_keywords = [
|
| 187 |
"what", "who", "when", "where", "how many", "which", "list", "name", "find", "does",
|
| 188 |
"is there", "are there", "can you tell me", "describe", "published by", "released by",
|
|
|
|
| 202 |
|
| 203 |
if '.remna eht sa "tfel" drow eht fo etisoppo eht etirw' in question_lower:
|
| 204 |
should_search = False
|
| 205 |
+
if "chess position provided in the image" in question_lower or "image." in question_lower:
|
| 206 |
should_search = False
|
| 207 |
|
| 208 |
search_results = None
|
| 209 |
if should_search and self.search_tool:
|
| 210 |
print(f"Searching for information about: {question}")
|
| 211 |
+
search_response = self.search_tool(question, max_results=3) # max_results changed to 3
|
| 212 |
search_results = search_response.get("results", [])
|
| 213 |
print(f"Found {len(search_results)} search results")
|
| 214 |
|
|
|
|
| 216 |
system_prompt = self.create_system_prompt()
|
| 217 |
|
| 218 |
print("Generating response with LLM...")
|
|
|
|
|
|
|
| 219 |
response = self.model.generate(prompt, system_prompt)
|
| 220 |
|
| 221 |
answer = response.strip()
|
|
|
|
| 226 |
]
|
| 227 |
|
| 228 |
for prefix in prefixes_to_remove:
|
| 229 |
+
if answer.lower().startswith(prefix.lower()):
|
| 230 |
answer = answer[len(prefix):].strip()
|
| 231 |
|
| 232 |
if (answer.startswith('"') and answer.endswith('"')) or \
|
|
|
|
| 252 |
if not api_key:
|
| 253 |
return "Error: GEMINI_API_KEY environment variable not found. Please set it in your Space settings.", None
|
| 254 |
|
| 255 |
+
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=api_key)
|
| 256 |
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
|
| 257 |
except Exception as e:
|
| 258 |
print(f"Error instantiating agent: {e}")
|
|
|
|
| 260 |
|
| 261 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 262 |
if not space_id:
|
| 263 |
+
agent_code = "https://huggingface.co/spaces/lethaq/Final_Assignment_Template/tree/main"
|
| 264 |
print(f"Agent code URL: {agent_code}")
|
| 265 |
|
| 266 |
print(f"Fetching questions from: {questions_url}")
|
| 267 |
try:
|
| 268 |
+
response = requests.get(questions_url, timeout=20)
|
| 269 |
response.raise_for_status()
|
| 270 |
questions_data = response.json()
|
| 271 |
if not questions_data:
|
|
|
|
| 374 |
|
| 375 |
gr.Markdown("## Test Single Question")
|
| 376 |
with gr.Row():
|
| 377 |
+
question_in = gr.Textbox(label="Question", lines=3, placeholder="Enter your question here...")
|
| 378 |
answer_out = gr.Textbox(label="Answer", lines=3, interactive=False)
|
| 379 |
|
| 380 |
test_btn = gr.Button("Test Question", variant="secondary")
|
|
|
|
| 387 |
if not api_key:
|
| 388 |
return "Error: GEMINI_API_KEY environment variable not found"
|
| 389 |
|
| 390 |
+
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key='AIzaSyAhmwogxZFBtt7_OUsKQGNeOYF7ced39bM')
|
| 391 |
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
|
| 392 |
answer = agent(question)
|
| 393 |
return answer
|