SamarthPujari commited on
Commit
7799d69
·
verified ·
1 Parent(s): d1e43be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -93
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
2
  import datetime
3
  import requests
4
  import pytz
@@ -9,6 +9,8 @@ from Gradio_UI import GradioUI
9
  import fitz # PyMuPDF
10
  from sentence_transformers import SentenceTransformer, util
11
  from transformers import pipeline
 
 
12
 
13
  # API Key for weather
14
  API_KEY = os.getenv("Weather_Token")
@@ -16,61 +18,37 @@ API_KEY = os.getenv("Weather_Token")
16
  # -------------------- TOOL 1: Get Weather --------------------
17
  @tool
18
  def get_current_weather(place: str) -> str:
19
- """
20
- A tool that fetches the current weather of a particular place.
21
- Args:
22
- place (str): A string representing a valid place (e.g., 'London/Paris').
23
- Returns:
24
- str: Weather description including condition, temperature, humidity, and wind speed.
25
- """
26
- api_key = API_KEY
27
  url = "https://api.openweathermap.org/data/2.5/weather"
28
  params = {
29
  "q": place,
30
- "appid": api_key,
31
  "units": "metric"
32
  }
33
-
34
  try:
35
  response = requests.get(url, params=params)
36
  data = response.json()
37
-
38
  if response.status_code == 200:
39
- weather_desc = data["weather"][0]["description"]
40
- temperature = data["main"]["temp"]
41
- humidity = data["main"]["humidity"]
42
- wind_speed = data["wind"]["speed"]
43
-
44
  return (
45
  f"Weather in {place}:\n"
46
- f"- Condition: {weather_desc}\n"
47
- f"- Temperature: {temperature}°C\n"
48
- f"- Humidity: {humidity}%\n"
49
- f"- Wind Speed: {wind_speed} m/s"
50
  )
51
  else:
52
- return f"Error: {data['message']}"
53
  except Exception as e:
54
- return f"Error fetching weather data for '{place}': {str(e)}"
55
-
56
 
57
  # -------------------- TOOL 2: Get Time --------------------
58
  @tool
59
  def get_current_time_in_timezone(timezone: str) -> str:
60
- """
61
- A tool that fetches the current local time in a specified timezone.
62
- Args:
63
- timezone (str): A string representing a valid timezone (e.g., 'America/New_York').
64
- Returns:
65
- str: The current local time formatted as a string.
66
- """
67
  try:
68
  tz = pytz.timezone(timezone)
69
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
70
  return f"The current local time in {timezone} is: {local_time}"
71
  except Exception as e:
72
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
73
-
74
 
75
  # -------------------- TOOL 3: Document QnA --------------------
76
  embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
@@ -78,80 +56,63 @@ qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
78
 
79
  @tool
80
  def document_qna_tool(pdf_path: str, question: str) -> str:
81
- """
82
- A tool that answers natural language questions about a given PDF document.
83
- Args:
84
- pdf_path (str): Path to the local PDF file.
85
- question (str): Question about the content of the PDF.
86
- Returns:
87
- str: Answer to the question based on the content.
88
- """
89
- import os, fitz, traceback
90
- from sentence_transformers import SentenceTransformer, util
91
- from transformers import pipeline
92
-
93
  try:
94
- print(f"[DEBUG] PDF Path: {pdf_path}")
95
- print(f"[DEBUG] Question: {question}")
96
-
97
  if not os.path.exists(pdf_path):
98
  return f"[ERROR] File not found: {pdf_path}"
99
-
100
- print("[DEBUG] Opening PDF...")
101
- try:
102
- doc = fitz.open(pdf_path)
103
- except RuntimeError as e:
104
- return f"[ERROR] Could not open PDF. It may be corrupted or encrypted. Details: {str(e)}"
105
-
106
- text_chunks = []
107
- for page in doc:
108
- text = page.get_text()
109
- if text.strip():
110
- text_chunks.append(text)
111
  doc.close()
112
-
113
  if not text_chunks:
114
  return "[ERROR] No readable text in the PDF."
115
 
116
- print(f"[DEBUG] Extracted {len(text_chunks)} text chunks.")
117
-
118
- embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
119
  embeddings = embedding_model.encode(text_chunks, convert_to_tensor=True)
120
  question_embedding = embedding_model.encode(question, convert_to_tensor=True)
121
-
122
- print("[DEBUG] Performing semantic search...")
123
  scores = util.pytorch_cos_sim(question_embedding, embeddings)[0]
124
- best_match_idx = scores.argmax().item()
125
- best_context = text_chunks[best_match_idx]
126
 
127
- qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
128
  prompt = f"Context: {best_context}\nQuestion: {question}"
129
- print("[DEBUG] Calling QA model...")
130
  answer = qa_pipeline(prompt, max_new_tokens=500)[0]['generated_text']
131
-
132
  return f"Answer: {answer.strip()}"
 
 
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  except Exception as e:
135
- return f"[EXCEPTION] {type(e).__name__}: {str(e)}\n{traceback.format_exc()}"
136
 
137
- # -------------------- Other Components --------------------
138
- final_answer = FinalAnswerTool()
139
- search_tool = DuckDuckGoSearchTool()
140
 
141
- model = HfApiModel(
142
- max_tokens=2096,
143
- temperature=0.5,
144
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
145
- custom_role_conversions=None,
146
- )
 
 
147
 
148
- from smolagents import Tool
 
 
149
 
150
- image_generation_tool = Tool.from_space(
151
- "black-forest-labs/FLUX.1-schnell",
152
- name="image_generator", # You can name it whatever makes sense for your agent
153
- description="Generate an image from a prompt"
154
- )
155
 
156
  with open("prompts.yaml", 'r') as stream:
157
  prompt_templates = yaml.safe_load(stream)
@@ -161,17 +122,13 @@ agent = CodeAgent(
161
  tools=[
162
  get_current_time_in_timezone,
163
  get_current_weather,
164
- image_generation_tool,
165
  search_tool,
166
- document_qna_tool, # ← New Tool Added
167
  final_answer
168
  ],
169
  max_steps=6,
170
  verbosity_level=1,
171
- grammar=None,
172
- planning_interval=None,
173
- name=None,
174
- description=None,
175
  prompt_templates=prompt_templates
176
  )
177
 
 
1
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, load_tool, tool
2
  import datetime
3
  import requests
4
  import pytz
 
9
  import fitz # PyMuPDF
10
  from sentence_transformers import SentenceTransformer, util
11
  from transformers import pipeline
12
+ from diffusers import StableDiffusionPipeline
13
+ import torch
14
 
15
  # API Key for weather
16
  API_KEY = os.getenv("Weather_Token")
 
18
  # -------------------- TOOL 1: Get Weather --------------------
19
  @tool
20
  def get_current_weather(place: str) -> str:
 
 
 
 
 
 
 
 
21
  url = "https://api.openweathermap.org/data/2.5/weather"
22
  params = {
23
  "q": place,
24
+ "appid": API_KEY,
25
  "units": "metric"
26
  }
 
27
  try:
28
  response = requests.get(url, params=params)
29
  data = response.json()
 
30
  if response.status_code == 200:
 
 
 
 
 
31
  return (
32
  f"Weather in {place}:\n"
33
+ f"- Condition: {data['weather'][0]['description']}\n"
34
+ f"- Temperature: {data['main']['temp']}°C\n"
35
+ f"- Humidity: {data['main']['humidity']}%\n"
36
+ f"- Wind Speed: {data['wind']['speed']} m/s"
37
  )
38
  else:
39
+ return f"Error: {data.get('message', 'Unknown error')}"
40
  except Exception as e:
41
+ return f"Error fetching weather data: {str(e)}"
 
42
 
43
  # -------------------- TOOL 2: Get Time --------------------
44
  @tool
45
  def get_current_time_in_timezone(timezone: str) -> str:
 
 
 
 
 
 
 
46
  try:
47
  tz = pytz.timezone(timezone)
48
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
49
  return f"The current local time in {timezone} is: {local_time}"
50
  except Exception as e:
51
+ return f"Error fetching time: {str(e)}"
 
52
 
53
  # -------------------- TOOL 3: Document QnA --------------------
54
  embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
 
56
 
57
  @tool
58
  def document_qna_tool(pdf_path: str, question: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
59
  try:
 
 
 
60
  if not os.path.exists(pdf_path):
61
  return f"[ERROR] File not found: {pdf_path}"
62
+ doc = fitz.open(pdf_path)
63
+ text_chunks = [page.get_text() for page in doc if page.get_text().strip()]
 
 
 
 
 
 
 
 
 
 
64
  doc.close()
 
65
  if not text_chunks:
66
  return "[ERROR] No readable text in the PDF."
67
 
 
 
 
68
  embeddings = embedding_model.encode(text_chunks, convert_to_tensor=True)
69
  question_embedding = embedding_model.encode(question, convert_to_tensor=True)
 
 
70
  scores = util.pytorch_cos_sim(question_embedding, embeddings)[0]
71
+ best_context = text_chunks[scores.argmax().item()]
 
72
 
 
73
  prompt = f"Context: {best_context}\nQuestion: {question}"
 
74
  answer = qa_pipeline(prompt, max_new_tokens=500)[0]['generated_text']
 
75
  return f"Answer: {answer.strip()}"
76
+ except Exception as e:
77
+ return f"[EXCEPTION] {type(e).__name__}: {str(e)}"
78
 
79
+ # -------------------- TOOL 4: Local Image Generation --------------------
80
+ @tool
81
+ def image_generator(prompt: str) -> str:
82
+ try:
83
+ device = "cuda" if torch.cuda.is_available() else "cpu"
84
+ pipe = StableDiffusionPipeline.from_pretrained(
85
+ "runwayml/stable-diffusion-v1-5",
86
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32
87
+ ).to(device)
88
+ image = pipe(prompt).images[0]
89
+ output_path = "generated_image.png"
90
+ image.save(output_path)
91
+ return f"Image saved at {output_path}"
92
  except Exception as e:
93
+ return f"Image generation failed: {str(e)}"
94
 
95
+ # -------------------- Local LLM (Replaces HfApiModel) --------------------
96
+ from smolagents import LocalModel
 
97
 
98
+ class TransformersModel(LocalModel):
99
+ def __init__(self):
100
+ self.pipeline = pipeline(
101
+ "text-generation",
102
+ model="tiiuae/falcon-7b-instruct",
103
+ device_map="auto",
104
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
105
+ )
106
 
107
+ def generate(self, prompt, **kwargs):
108
+ result = self.pipeline(prompt, max_new_tokens=500, do_sample=True)
109
+ return result[0]['generated_text']
110
 
111
+ model = TransformersModel()
112
+
113
+ # -------------------- Agent Setup --------------------
114
+ final_answer = FinalAnswerTool()
115
+ search_tool = DuckDuckGoSearchTool()
116
 
117
  with open("prompts.yaml", 'r') as stream:
118
  prompt_templates = yaml.safe_load(stream)
 
122
  tools=[
123
  get_current_time_in_timezone,
124
  get_current_weather,
125
+ image_generator,
126
  search_tool,
127
+ document_qna_tool,
128
  final_answer
129
  ],
130
  max_steps=6,
131
  verbosity_level=1,
 
 
 
 
132
  prompt_templates=prompt_templates
133
  )
134