Spaces:
Sleeping
Sleeping
File size: 6,930 Bytes
bf4d78e 9b5b26a 1160e0d 2130f7e c19d193 2130f7e b4da6a9 bf4d78e b4da6a9 22191b9 b4da6a9 2130f7e cd77c95 b4da6a9 3c20056 ff4dce1 3c20056 ff4dce1 3c20056 ff4dce1 3c20056 ff4dce1 4f86fde ff4dce1 4f86fde ff4dce1 4f86fde 10aabf5 a436792 ff4dce1 a436792 ff4dce1 a436792 ff4dce1 a436792 ff4dce1 10aabf5 ff4dce1 1160e0d 2130f7e cd77c95 9b5b26a 3c20056 ff4dce1 3c20056 ff4dce1 3c20056 ff4dce1 3c20056 9b5b26a ff4dce1 1160e0d 2130f7e b4da6a9 cd77c95 f677e55 3c20056 ff4dce1 3c20056 ff4dce1 3c20056 ff4dce1 3c20056 ff4dce1 b4da6a9 ff4dce1 e58d6bc 5e6341f ff4dce1 b4da6a9 ff4dce1 b4da6a9 5e6341f ff4dce1 b4da6a9 ff4dce1 b4da6a9 ff4dce1 b4da6a9 ff4dce1 b4da6a9 ff4dce1 f418715 ff4dce1 b4da6a9 ff4dce1 7799d69 ff4dce1 2130f7e fb94e1c bf4d78e fb94e1c bf4d78e fb94e1c bf4d78e fb94e1c ff4dce1 e2ed5fb 7799d69 8c01ffb ff4dce1 861422e b4da6a9 8c01ffb 8fe992b b4da6a9 365f757 b4da6a9 365f757 b4da6a9 8c01ffb ff4dce1 861422e 8fe992b 4f86fde | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool, Tool
import datetime
import requests
import pytz
import yaml
import os
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
import fitz # PyMuPDF
from sentence_transformers import SentenceTransformer, util
from transformers import pipeline
from PIL import Image
import io
# API Key for weather
API_KEY = os.getenv("Weather_Token")
# -------------------- TOOL 1: Get Weather --------------------
@tool
def get_current_weather(place: str) -> str:
"""
A tool that fetches the current weather of a particular place.
Args:
place (str): A string representing a valid place (e.g., 'London/Paris').
Returns:
str: Weather description including condition, temperature, humidity, and wind speed.
"""
api_key = API_KEY
url = "https://api.openweathermap.org/data/2.5/weather"
params = {
"q": place,
"appid": api_key,
"units": "metric"
}
try:
response = requests.get(url, params=params)
data = response.json()
if response.status_code == 200:
weather_desc = data["weather"][0]["description"]
temperature = data["main"]["temp"]
humidity = data["main"]["humidity"]
wind_speed = data["wind"]["speed"]
return (
f"Weather in {place}:\n"
f"- Condition: {weather_desc}\n"
f"- Temperature: {temperature}°C\n"
f"- Humidity: {humidity}%\n"
f"- Wind Speed: {wind_speed} m/s"
)
else:
return f"Error: {data['message']}"
except Exception as e:
return f"Error fetching weather data for '{place}': {str(e)}"
# -------------------- TOOL 2: Get Time --------------------
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""
A tool that fetches the current local time in a specified timezone.
Args:
timezone (str): A string representing a valid timezone (e.g., 'America/New_York').
Returns:
str: The current local time formatted as a string.
"""
try:
tz = pytz.timezone(timezone)
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
# -------------------- TOOL 3: Document QnA --------------------
embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
@tool
def document_qna_tool(pdf_path: str, question: str) -> str:
"""
A tool that answers natural language questions about a given PDF document.
Args:
pdf_path (str): Path to the local PDF file.
question (str): Question about the content of the PDF.
Returns:
str: Answer to the question based on the content.
"""
import os, fitz, traceback
from sentence_transformers import SentenceTransformer, util
from transformers import pipeline
try:
print(f"[DEBUG] PDF Path: {pdf_path}")
print(f"[DEBUG] Question: {question}")
if not os.path.exists(pdf_path):
return f"[ERROR] File not found: {pdf_path}"
print("[DEBUG] Opening PDF...")
try:
doc = fitz.open(pdf_path)
except RuntimeError as e:
return f"[ERROR] Could not open PDF. It may be corrupted or encrypted. Details: {str(e)}"
text_chunks = []
for page in doc:
text = page.get_text()
if text.strip():
text_chunks.append(text)
doc.close()
if not text_chunks:
return "[ERROR] No readable text in the PDF."
print(f"[DEBUG] Extracted {len(text_chunks)} text chunks.")
embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
embeddings = embedding_model.encode(text_chunks, convert_to_tensor=True)
question_embedding = embedding_model.encode(question, convert_to_tensor=True)
print("[DEBUG] Performing semantic search...")
scores = util.pytorch_cos_sim(question_embedding, embeddings)[0]
best_match_idx = scores.argmax().item()
best_context = text_chunks[best_match_idx]
qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
prompt = f"Context: {best_context}\nQuestion: {question}"
print("[DEBUG] Calling QA model...")
answer = qa_pipeline(prompt, max_new_tokens=500)[0]['generated_text']
return f"Answer: {answer.strip()}"
except Exception as e:
return f"[EXCEPTION] {type(e).__name__}: {str(e)}\n{traceback.format_exc()}"
# -------------------- TOOL 4: Image Generation --------------------
@tool
def generate_image(prompt: str) -> Image.Image:
"""
A tool that generates an image from a text prompt using a Hugging Face Space.
Args:
prompt (str): The text description of the image to generate.
Returns:
Image.Image: The generated image as a PIL Image object.
"""
try:
# Use the hosted image generation model from Hugging Face Spaces
image_generator = Tool.from_space(
"black-forest-labs/FLUX.1-schnell",
name="image_generator",
description="Generate an image from a prompt"
)
# Call the model with the prompt
result = image_generator(prompt=prompt)
# If the result is bytes → convert to PIL.Image
if isinstance(result, bytes):
return Image.open(io.BytesIO(result))
# If the result is already a PIL Image
if isinstance(result, Image.Image):
return result
# If the model gave back a path, open it
if isinstance(result, str):
return Image.open(result)
raise ValueError("Unexpected output format from image generator.")
except Exception as e:
raise RuntimeError(f"Error generating image: {str(e)}")
# -------------------- Other Components --------------------
final_answer = FinalAnswerTool()
search_tool = DuckDuckGoSearchTool()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[
get_current_time_in_timezone,
get_current_weather,
generate_image,
search_tool,
document_qna_tool,
final_answer
],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |