innafomina commited on
Commit
8b9e37a
·
1 Parent(s): 3c13687

added more tools

Browse files
Files changed (6) hide show
  1. README.md +3 -6
  2. agent.py +56 -0
  3. app.py +81 -49
  4. requirements.txt +15 -0
  5. tools.py +356 -0
  6. utils.py +239 -0
README.md CHANGED
@@ -1,14 +1,11 @@
1
  ---
2
  title: Power Agent
3
- emoji: 💬
4
- colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: Multimodal agent with a wide range of tools
12
- ---
13
-
14
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
  title: Power Agent
3
+ emoji: 🚀
4
+ colorFrom: pink
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: Multimodal agent with a wide range of tools.
 
 
 
agent.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from dataclasses import asdict
7
+ from smolagents import CodeAgent, OpenAIServerModel, DuckDuckGoSearchTool, WikipediaSearchTool, GoogleSearchTool, LiteLLMModel
8
+ from dotenv import find_dotenv, load_dotenv
9
+ from tools import SafeDuckDuckGoSearchTool, ImageUnderstanding, WikipediaSearch, ExcelReader, CsvReader,ChessSolver, download_files, get_images, FileReader, AudioTransciber, YouTubeTranscipt, YouTubeVideoUnderstanding, WeatherTool, OCR
10
+ from pathlib import Path
11
+ from PIL import Image
12
+ import time
13
+ from smolagents import stream_to_gradio
14
+
15
+ class PowerAgent(CodeAgent):
16
+ def __init__(self, model):
17
+ #web_search = DuckDuckGoSearchTool()
18
+ tools = [
19
+ SafeDuckDuckGoSearchTool(),
20
+ ImageUnderstanding(),
21
+ WikipediaSearchTool(),
22
+ FileReader(),
23
+ ExcelReader(),
24
+ CsvReader(),
25
+ AudioTransciber(),
26
+ YouTubeTranscipt(),
27
+ YouTubeVideoUnderstanding(),
28
+ WeatherTool(),
29
+ OCR()
30
+ ]
31
+ super().__init__(
32
+ tools=tools,
33
+ model=model,
34
+ verbosity_level=3,
35
+ additional_authorized_imports=['pandas', 'numpy', 'io']
36
+ )
37
+ # Add your custom system prompt
38
+ SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question. Report your thoughts, and
39
+ finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
40
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated
41
+ list of numbers and/or strings.
42
+ If you are asked for a number, don't use comma to write your number neither use units such as $ or
43
+ percent sign unless specified otherwise.
44
+ If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the
45
+ digits in plain text unless specified otherwise. Never include currency symbols in the response.
46
+ If you are asked for a comma separated list, apply the above rules depending of whether the element
47
+ to be put in the list is a number or a string. For question that contain phrases like `what is the number` or
48
+ `what is the highest number` return just the number, e.g., 2. For questions around currency,
49
+ include just the number, not the currency sign.
50
+ """
51
+ self.prompt_templates["system_prompt"] = self.prompt_templates["system_prompt"] + SYSTEM_PROMPT
52
+
53
+ # def __call__(self, message: str) -> str:
54
+ # final_answer = self.agent.run(message)
55
+ # print(f"Agent returning answer: {final_answer}")
56
+ # return final_answer
app.py CHANGED
@@ -1,64 +1,96 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
 
 
 
 
 
 
 
 
 
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  """
44
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
  """
46
  demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
  ),
 
 
 
 
 
 
 
 
 
 
 
59
  ],
60
- )
 
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from dataclasses import asdict
7
+ from smolagents import CodeAgent, OpenAIServerModel, DuckDuckGoSearchTool, WikipediaSearchTool, HfApiModel, GoogleSearchTool, LiteLLMModel
8
+ from dotenv import find_dotenv, load_dotenv
9
+ from tools import ImageUnderstanding, WikipediaSearch, ExcelReader, CsvReader,ChessSolver, download_files, get_images, FileReader, AudioTransciber, YouTubeTranscipt, YouTubeVideoUnderstanding, VegetableFruitClassification
10
+ from pathlib import Path
11
+ from PIL import Image
12
+ import time
13
+ from smolagents import stream_to_gradio
14
+ from agent import PowerAgent
15
  """
16
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
17
  """
18
+
19
+ def interact_with_agent(message, history):
20
+ load_dotenv(find_dotenv())
21
+ gemini_model = LiteLLMModel(model_id= "gemini/gemini-2.0-flash",
22
+ api_key=os.getenv("GEMINI_API_KEY"))
23
+ # os.environ["SERPER_API_KEY"] = os.getenv('SERPER_API_KEY')
24
+ prompt = message['text']
25
+ web_search = DuckDuckGoSearchTool()
26
+ # agent = CodeAgent(
27
+ # tools=[
28
+ # web_search,
29
+ # ImageUnderstanding(),
30
+ # FileReader(),
31
+ # ExcelReader(),
32
+ # CsvReader(),
33
+ # AudioTransciber()
34
+ # ],
35
+ # model=gemini_model,
36
+ # verbosity_level=3,
37
+ # additional_authorized_imports=['pandas','numpy', 'io']
38
+ # )
39
+ agent = PowerAgent(model=gemini_model)
40
+ messages = []
41
+ formatted_history = ""
42
+ if len(message.get('files')) > 0:
43
+ file_prompt = "The following files are uploaded: " + str(message.get('files'))
44
+ else:
45
+ file_prompt = ""
46
+ for entry in history:
47
+ user_msg = ''
48
+ agent_msg = ''
49
+ role = entry.get('role')
50
+ if role == 'user':
51
+ user_msg += entry.get('content')
52
+ if role == 'assistant':
53
+ agent_msg += entry.get('content')
54
+ formatted_history += f"User: {user_msg}\nAgent: {agent_msg}\n"
55
+ # Add current message
56
+ prompt = formatted_history + f"User: {message['text']}{file_prompt}\nAgent:"
57
+ #yield messages
58
+ for msg in stream_to_gradio(agent, prompt):
59
+ messages.append(asdict(msg))
60
+ yield messages
61
+ history.append([{'user':prompt, 'agent':msg.content}])
62
+ #print(history)
63
+ yield messages
64
 
65
 
66
  """
67
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
68
  """
69
  demo = gr.ChatInterface(
70
+ interact_with_agent,
71
+ theme=gr.themes.Monochrome(),
72
+ chatbot= gr.Chatbot(
73
+ label="Power Agent",
74
+ type="messages",
75
+ avatar_images=(
76
+ "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
77
+ "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
 
 
 
78
  ),
79
+ ),
80
+ fill_height=True,
81
+ #textbox=gr.Textbox(placeholder="Ask me a question", container=False, scale=7),
82
+ multimodal=True,
83
+ textbox=gr.MultimodalTextbox(placeholder="Ask me a question",
84
+ file_count="multiple",
85
+ file_types=['image', '.json', '.mp4', '.pdf', '.txt', '.csv', '.xlsx', '.xls', 'audio'],
86
+ sources=["upload", "microphone"],
87
+ container=False, scale=7),
88
+ examples=[
89
+ ["Solve a math problem"],
90
  ],
91
+ type="messages",
92
+ )
93
 
94
 
95
  if __name__ == "__main__":
96
+ demo.launch(debug=False, share=True)
requirements.txt CHANGED
@@ -1 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  huggingface_hub==0.25.2
 
1
+ gradio
2
+ requests==2.32.3
3
+ smolagents==1.14.0
4
+ smolagents[openai]
5
+ smolagents[litellm]
6
+ openpyxl==3.1.5
7
+ wikipedia==1.4.0
8
+ llama-index-readers-youtube-transcript==0.3.0
9
+ google-genai==1.13.0
10
+ tabulate==0.9.0
11
+ beautifulsoup4==4.13.3
12
+ pandas==2.2.3
13
+ openai==1.75.0
14
+ google==3.0.0
15
+ pillow==11.1.0
16
  huggingface_hub==0.25.2
tools.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import io
3
+ from smolagents import Tool
4
+ import wikipedia
5
+ from bs4 import BeautifulSoup
6
+ import pandas as pd
7
+ import requests
8
+ from tabulate import tabulate
9
+ import os
10
+ import tempfile
11
+ from pathlib import Path
12
+ from PIL import Image
13
+ from PIL import ImageDraw
14
+ from io import BytesIO
15
+ from dotenv import find_dotenv, load_dotenv
16
+ from openai import OpenAI
17
+ from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
18
+ from google import genai
19
+ from google.genai import types
20
+ import chess
21
+ from PIL import ImageDraw
22
+ from smolagents import DuckDuckGoSearchTool
23
+ from duckduckgo_search.exceptions import DuckDuckGoSearchException
24
+ import torch
25
+ import easyocr
26
+ import fitz
27
+ import numpy as np
28
+
29
+ class SafeDuckDuckGoSearchTool(DuckDuckGoSearchTool):
30
+ def forward(self, query: str) -> str:
31
+ try:
32
+ return super().forward(query)
33
+ except DuckDuckGoSearchException as e:
34
+ error_message = f"❌ DuckDuckGoSearchException: {str(e)}. Retrying in 3 seconds due to possible rate limit..."
35
+ print(error_message)
36
+ time.sleep(3)
37
+ try:
38
+ return super().forward(query)
39
+ except DuckDuckGoSearchException as e2:
40
+ return f"❌ DuckDuckGoSearchException after retry: {str(e2)}. The search engine may be rate-limited. Please try again later."
41
+ except Exception as e2:
42
+ return f"❌ An unexpected error occurred during web search after retry: {str(e2)}"
43
+ except Exception as e:
44
+ return f"❌ An unexpected error occurred during web search: {str(e)}"
45
+
46
+ class WikipediaSearch(Tool):
47
+ name = "wikipedia_search"
48
+ description = "Fetches wikipedia pages."
49
+ inputs = {
50
+ "query": {
51
+ "type": "string",
52
+ "description": "Query to be searched on wikipedia"
53
+ }
54
+ }
55
+ output_type = "string"
56
+
57
+ def forward(self, query:str)->str:
58
+ res = wikipedia.page(query)
59
+ bs = BeautifulSoup(res.html(), 'html.parser')
60
+ text_only = bs.get_text()
61
+ return text_only
62
+
63
+ class ExcelReader(Tool):
64
+ name = 'excel_processor'
65
+ description = "excel reading tool, processed files of .xlsx and .xls format."
66
+ inputs = {
67
+
68
+ "file_path": {
69
+ "type": "string",
70
+ "description": "path to the excel file"
71
+ }
72
+ }
73
+ output_type = "string"
74
+
75
+ def forward(self, file_path:str)->str:
76
+ df = pd.read_excel(file_path)
77
+ txt_excel = tabulate(df, headers="keys", tablefmt="github", showindex=False)
78
+ return txt_excel
79
+
80
+ class CsvReader(Tool):
81
+ name = 'csv_processor'
82
+ description = "csv reading tool, processed files of .csv format."
83
+ inputs = {
84
+
85
+ "file_path": {
86
+ "type": "string",
87
+ "description": "path to the excel file"
88
+ }
89
+ }
90
+ output_type = "string"
91
+
92
+ def forward(self, file_path:str)->str:
93
+ df = pd.read_csv(file_path)
94
+ txt = tabulate(df, headers="keys", tablefmt="github", showindex=False)
95
+ return txt
96
+
97
+ class FileReader(Tool):
98
+ name = 'file_reader'
99
+ description = "reads saved files"
100
+ inputs = {
101
+
102
+ "file_path": {
103
+ "type": "string",
104
+ "description": "path to the file"
105
+ }
106
+ }
107
+ output_type = "string"
108
+
109
+ def forward(self, file_path:str)->str:
110
+ with open(file_path, "r") as file:
111
+ content = file.read()
112
+ return content
113
+
114
+ def download_files(task_id, file_name):
115
+ url = f'https://agents-course-unit4-scoring.hf.space/files/{task_id}'
116
+ response = requests.get(url, timeout=15)
117
+ tmp_dir = Path(tempfile.gettempdir()) / "project_files"
118
+ tmp_dir.mkdir(exist_ok=True)
119
+ filepath = os.path.join(tmp_dir, file_name)
120
+ with open(filepath, "wb") as f:
121
+ f.write(response.content)
122
+
123
+ return filepath
124
+
125
+ def get_images(file_format, file_path):
126
+ if file_format in ['png', 'jpeg', 'jpg']:
127
+ images = [Image.open(file_path).convert("RGB")]
128
+ else:
129
+ images = []
130
+
131
+ return images
132
+
133
+ class AudioTransciber(Tool):
134
+ name = 'audio_transcriber'
135
+ description = "transcribes audio files"
136
+ inputs = {
137
+
138
+ "file_path": {
139
+ "type": "string",
140
+ "description": "path to the file"
141
+ }
142
+ }
143
+ output_type = "string"
144
+
145
+ def forward(self, file_path:str)->str:
146
+ audio = open(file_path, 'rb')
147
+ client = OpenAI(api_key=os.getenv("OPEN_AI_KEY"))
148
+ transcript = client.audio.transcriptions.create(model='whisper-1',
149
+ file=audio)
150
+ return transcript
151
+
152
+ class YouTubeTranscipt(Tool):
153
+ name = 'youtube_transcript'
154
+ description = "a tool that returns a transcript for a youtube video. Youtube videos come from urls containing www.youtube.com"
155
+ inputs = {
156
+
157
+ "url": {
158
+ "type": "string",
159
+ "description": "url to the youtube video, has 'www.youtube.com' in it."
160
+ }
161
+ }
162
+ output_type = "string"
163
+
164
+ def forward(self, url:str)->str:
165
+ loader = YoutubeTranscriptReader()
166
+ documents = loader.load_data(ytlinks=[url])
167
+ transcript = documents[0].text
168
+ return transcript
169
+
170
+ class YouTubeVideoUnderstanding(Tool):
171
+ name = 'youtube_video_understanding'
172
+ description = "a tool that processes summarizes what is happenening in a youtube video. Youtube videos come from urls containing www.youtube.com"
173
+ inputs = {
174
+ "url": {
175
+ "type": "string",
176
+ "description": "url to the youtube video, has 'www.youtube.com' in it."
177
+ },
178
+ "prompt": {
179
+ "type": "string",
180
+ "description": "user prompt about the video content"
181
+
182
+ }
183
+ }
184
+ output_type = "string"
185
+
186
+ def forward(self, url:str, prompt:str)->str:
187
+ load_dotenv(find_dotenv())
188
+ client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
189
+ response = client.models.generate_content(
190
+ model='models/gemini-2.5-flash',
191
+ contents=types.Content(
192
+ parts=[
193
+ types.Part(
194
+ file_data=types.FileData(file_uri=url)
195
+ ),
196
+ types.Part(text=prompt)
197
+ ]
198
+ )
199
+ )
200
+ return response.text
201
+
202
+ class ImageUnderstanding(Tool):
203
+ name = 'image_understanding'
204
+ description = "a tool that answers questions about images. Images are files in the .jpeg, .png and jpg fomatats."
205
+ inputs = {
206
+ "file_path": {
207
+ "type": "string",
208
+ "description": "path to the image file. These are files in the .jpeg, .png and jpg fomatats."
209
+ },
210
+ "prompt": {
211
+ "type": "string",
212
+ "description": "user prompt about the image content"
213
+
214
+ }
215
+ }
216
+ output_type = "string"
217
+
218
+ def forward(self, file_path:str, prompt:str)->str:
219
+ load_dotenv(find_dotenv())
220
+ with open(file_path, 'rb') as f:
221
+ image_bytes = f.read()
222
+ file_format = file_path.split('.')[-1]
223
+ client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
224
+ response = client.models.generate_content(
225
+ model='models/gemini-2.0-flash',
226
+ contents=types.Content(
227
+ parts=[
228
+ types.Part.from_bytes(data=image_bytes,
229
+ mime_type=f'image/{file_format}'
230
+ ),
231
+ types.Part(text=prompt)
232
+ ]
233
+ )
234
+ )
235
+ return response.text
236
+
237
+ class ChessSolver(Tool):
238
+ name = "chess_analysis_tool"
239
+ description = "analyzes the chess board to determine the best next move."
240
+ inputs = {
241
+ "image_path": {
242
+ "type": "string",
243
+ "description": "path to the image showing a chess board."
244
+ },
245
+ "current_player":{
246
+ "type": "string",
247
+ "description": "player whose turn it is. Acceptable inputs are 'black' or 'white'"
248
+ },
249
+ }
250
+ output_type = "string"
251
+
252
+ def forward(self, image_path:str, current_player:str)->str:
253
+ fen = chess.fen_notation(image_path, current_player)
254
+ best_move = chess.chess_analysis(fen)
255
+ return best_move
256
+
257
+ class WeatherTool(Tool):
258
+ name = 'weather_tool'
259
+ description = "Gets current weather for the specified city. Units: 'metric' (Celsius) or 'imperial' (Fahrenheit, default)."
260
+ inputs = {
261
+ "location": {
262
+ "type": "string",
263
+ "description": "City name"
264
+ },
265
+ "units": {
266
+ "type": "string",
267
+ "description": "Units: 'metric' or 'imperial' (default: 'imperial')",
268
+ "default": "imperial",
269
+ "nullable": True
270
+ }
271
+ }
272
+ output_type = "string"
273
+ GEO_URL = "http://api.openweathermap.org/geo/1.0/direct"
274
+ WEATHER_URL = "https://api.openweathermap.org/data/2.5/weather"
275
+ DEFAULT_UNITS = "imperial"
276
+ VALID_UNITS = {"metric", "imperial"}
277
+
278
+ def get_lat_lon(self, city, api_key):
279
+ params = {"q": city, "appid": api_key, "limit": 1}
280
+ response = requests.get(self.GEO_URL, params=params)
281
+ response.raise_for_status()
282
+ data = response.json()
283
+ if not data:
284
+ raise ValueError(f"City '{city}' not found.")
285
+ return data[0]['lat'], data[0]['lon']
286
+
287
+ def get_weather(self, lat, lon, api_key, units):
288
+ params = {"lat": lat, "lon": lon, "appid": api_key, "units": units}
289
+ response = requests.get(self.WEATHER_URL, params=params)
290
+ response.raise_for_status()
291
+ return response.json()
292
+
293
+ def forward(self, location: str, units: str = "imperial") -> str:
294
+ """
295
+ Get current weather for a city.
296
+ """
297
+ try:
298
+ api_key = os.getenv("WEATHER_API_KEY")
299
+ units = units if units in self.VALID_UNITS else self.DEFAULT_UNITS
300
+ lat, lon = self.get_lat_lon(location, api_key)
301
+ weather = self.get_weather(lat, lon, api_key, units)
302
+ result = {
303
+ "description": weather['weather'][0]['description'],
304
+ "temperature": weather['main']['temp'],
305
+ "humidity": weather['main']['humidity'],
306
+ "pressure": weather['main']['pressure'],
307
+ "units": units,
308
+ "city": location
309
+ }
310
+ return str(result)
311
+ except Exception as e:
312
+ return f"❌ An error occurred while retrieving weather data: {str(e)}"
313
+
314
+ class OCR(Tool):
315
+ name = "ocr_tool"
316
+ description = "recognizes text from images."
317
+ inputs = {
318
+ "pdf_path": {
319
+ "type": "string",
320
+ "description": "path to the pdf with text."
321
+ },
322
+ "language": {
323
+ "type": "string",
324
+ "description": "language of the text in the image. Acceptable inputs are 'en' for English, 'es' for Spanish, 'fr' for French, 'de' for German, 'it' for Italian, 'pt' for Portuguese, 'ru' for Russian, 'zh' for Chinese, 'ja' for Japanese, 'ko' for Korean, 'ar' for Arabic, 'hi' for Hindi, 'bn' for Bengali, 'mr' for Marathi, 'ta' for Tamil, 'te' for Telugu, 'ur' for Urdu, 'fa' for Persian, 'tr' for Turkish, 'nl' for Dutch, 'sv' for Swedish, 'no' for Norwegian, 'da' for Danish, 'fi' for Finnish, 'el' for Greek, 'he' for Hebrew, 'id' for Indonesian, 'ms' for Malay, 'pl' for Polish, 'ro' for Romanian, 'sk' for Slovak, 'sl' for Slovenian, 'uk' for Ukrainian, 'vi' for Vietnamese, 'th' for Thai, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay, 'ms' for Malay etc."
325
+ }
326
+ }
327
+ output_type = "string"
328
+
329
+ def draw_boxes(self, image, bounds, color='yellow', width=2):
330
+ draw = ImageDraw.Draw(image)
331
+ for bound in bounds:
332
+ p0, p1, p2, p3 = bound[0]
333
+ draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
334
+ return image
335
+
336
+ def forward(self, pdf_path: str, language: str) -> str:
337
+ reader = easyocr.Reader([language])
338
+ results = []
339
+ all_text = ''
340
+ pdf_document = fitz.open(pdf_path)
341
+ for page_num in range(len(pdf_document)):
342
+ page = pdf_document[page_num]
343
+ pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72))
344
+ img = Image.frombytes('RGB', [pix.width, pix.height], pix.samples)
345
+ im_small = img.resize((img.width // 10, img.height // 10), Image.LANCZOS)
346
+ image_path = f'saved_image_{page_num}.jpeg'
347
+ im_small.save(image_path, 'JPEG')
348
+ bounds = reader.readtext(image_path)
349
+ im = Image.open(image_path)
350
+ self.draw_boxes(im, bounds)
351
+ im.save('result.jpg')
352
+ summary_df = pd.DataFrame(bounds).iloc[: , 1:]
353
+ text_result = ' '.join(summary_df[summary_df[2] >= 0.7][1].astype(str).tolist())
354
+
355
+ all_text += text_result
356
+ return all_text
utils.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def format_history(history):
2
+ formatted_history = ""
3
+ for entry in history:
4
+ user_msg = ''
5
+ agent_msg = ''
6
+ print('Entry', entry)
7
+ role = entry.get('role')
8
+ print(entry.get('content'))
9
+ if role == 'user':
10
+ user_msg += str(entry.get('content'))
11
+ if role == 'assistant':
12
+ agent_msg += str(entry.get('content'))
13
+ formatted_history += f"User: {user_msg}\nAgent: {agent_msg}\n"
14
+ return formatted_history
15
+
16
+ css = """
17
+ :root {
18
+ --background-fill-primary: var(--neutral-950);
19
+ --background-fill-secondary: var(--neutral-900);
20
+ --block-background-fill: var(--neutral-800);
21
+ --block-border-color: var(--border-color-primary);
22
+ --block-border-width: 1px;
23
+ --block-info-text-color: var(--body-text-color-subdued);
24
+ --block-label-background-fill: var(--background-fill-secondary);
25
+ --block-label-border-color: var(--border-color-primary);
26
+ --block-label-border-width: 1px;
27
+ --block-label-text-color: var(--neutral-200);
28
+ --block-shadow: none;
29
+ --block-title-background-fill: none;
30
+ --block-title-border-color: none;
31
+ --block-title-border-width: 0px;
32
+ --block-title-text-color: var(--neutral-200);
33
+ --body-background-fill: var(--background-fill-primary);
34
+ --body-text-color: var(--neutral-100);
35
+ --body-text-color-subdued: var(--neutral-400);
36
+ --border-color-accent: var(--neutral-600);
37
+ --border-color-primary: var(--neutral-700);
38
+ --button-border-width: var(--input-border-width);
39
+ --button-cancel-background-fill: var(--button-secondary-background-fill);
40
+ --button-cancel-background-fill-hover: var(--button-cancel-background-fill);
41
+ --button-cancel-border-color: var(--button-secondary-border-color);
42
+ --button-cancel-border-color-hover: var(--button-cancel-border-color);
43
+ --button-cancel-text-color: var(--button-secondary-text-color);
44
+ --button-cancel-text-color-hover: var(--button-cancel-text-color);
45
+ --button-primary-background-fill: var(--primary-700);
46
+ --button-primary-background-fill-hover: var(--button-primary-background-fill);
47
+ --button-primary-border-color: var(--primary-600);
48
+ --button-primary-border-color-hover: var(--button-primary-border-color);
49
+ --button-primary-text-color: white;
50
+ --button-primary-text-color-hover: var(--button-primary-text-color);
51
+ --button-secondary-background-fill: var(--neutral-600);
52
+ --button-secondary-background-fill-hover: var(--button-secondary-background-fill);
53
+ --button-secondary-border-color: var(--neutral-600);
54
+ --button-secondary-border-color-hover: var(--button-secondary-border-color);
55
+ --button-secondary-text-color: white;
56
+ --button-secondary-text-color-hover: var(--button-secondary-text-color);
57
+ --chatbot-code-background-color: var(--neutral-800);
58
+ --checkbox-background-color: var(--neutral-800);
59
+ --checkbox-background-color-focus: var(--checkbox-background-color);
60
+ --checkbox-background-color-hover: var(--checkbox-background-color);
61
+ --checkbox-background-color-selected: var(--secondary-600);
62
+ --checkbox-border-color: var(--neutral-700);
63
+ --checkbox-border-color-focus: var(--secondary-500);
64
+ --checkbox-border-color-hover: var(--neutral-600);
65
+ --checkbox-border-color-selected: var(--secondary-600);
66
+ --checkbox-border-width: var(--input-border-width);
67
+ --checkbox-label-background-fill: var(--button-secondary-background-fill);
68
+ --checkbox-label-background-fill-hover: var(--button-secondary-background-fill-hover);
69
+ --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill);
70
+ --checkbox-label-border-color: var(--border-color-primary);
71
+ --checkbox-label-border-color-hover: var(--checkbox-label-border-color);
72
+ --checkbox-label-border-width: var(--input-border-width);
73
+ --checkbox-label-text-color: var(--body-text-color);
74
+ --checkbox-label-text-color-selected: var(--checkbox-label-text-color);
75
+ --color-accent-soft: var(--neutral-700);
76
+ --error-background-fill: var(--background-fill-primary);
77
+ --error-border-color: var(--border-color-primary);
78
+ --error-border-width: 1px;
79
+ --error-text-color: #ef4444;
80
+ --input-background-fill: var(--neutral-700);
81
+ --input-background-fill-focus: var(--secondary-600);
82
+ --input-background-fill-hover: var(--input-background-fill);
83
+ --input-border-color: var(--border-color-primary);
84
+ --input-border-color-focus: var(--neutral-700);
85
+ --input-border-color-hover: var(--input-border-color);
86
+ --input-border-width: 0px;
87
+ --input-placeholder-color: var(--neutral-500);
88
+ --input-shadow: none;
89
+ --input-shadow-focus: var(--input-shadow);
90
+ --link-text-color-active: var(--secondary-500);
91
+ --link-text-color: var(--secondary-500);
92
+ --link-text-color-hover: var(--secondary-400);
93
+ --link-text-color-visited: var(--secondary-600);
94
+ --loader-color: var(--color-accent);
95
+ --panel-background-fill: var(--background-fill-secondary);
96
+ --panel-border-color: var(--border-color-primary);
97
+ --panel-border-width: 0;
98
+ --shadow-spread: 1px;
99
+ --slider-color: auto;
100
+ --stat-background-fill: var(--primary-500);
101
+ --table-border-color: var(--neutral-700);
102
+ --table-even-background-fill: var(--neutral-950);
103
+ --table-odd-background-fill: var(--neutral-900);
104
+ --table-row-focus: var(--color-accent-soft);
105
+ --accordion-text-color: var(--body-text-color);
106
+ --table-text-color: var(--body-text-color);
107
+ --border-color-accent-subdued: var(--border-color-accent);
108
+ --code-background-fill: var(--neutral-800);
109
+ --checkbox-label-border-color-selected: var(--checkbox-label-border-color);
110
+ --error-icon-color: #ef4444;
111
+ --button_primary_shadow: None;
112
+ --button-primary-shadow-hover: var(--button-primary-shadow);
113
+ --button-primary-shadow-active: var(--button-primary-shadow);
114
+ --button_secondary_shadow: None;
115
+ --button-secondary-shadow-hover: var(--button-secondary-shadow);
116
+ --button-secondary-shadow-active: var(--button-secondary-shadow);
117
+ --name: Franklisi/darkmode;
118
+ --block-info-text-size: var(--text-sm);
119
+ --block-info-text-weight: 400;
120
+ --block-label-margin: 0;
121
+ --block-label-padding: var(--spacing-sm) var(--spacing-lg);
122
+ --block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0;
123
+ --block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px);
124
+ --block-label-shadow: var(--block-shadow);
125
+ --block-label-text-size: var(--text-sm);
126
+ --block-label-text-weight: 400;
127
+ --block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px);
128
+ --block-radius: var(--radius-lg);
129
+ --block-title-padding: 0;
130
+ --block-title-radius: none;
131
+ --block-title-text-size: var(--text-md);
132
+ --block-title-text-weight: 400;
133
+ --body-text-size: var(--text-md);
134
+ --body-text-weight: 400;
135
+ --button-large-padding: var(--spacing-lg) calc(2 * var(--spacing-lg));
136
+ --button-large-radius: var(--radius-lg);
137
+ --button-large-text-size: var(--text-lg);
138
+ --button-large-text-weight: 600;
139
+ --button-shadow: none;
140
+ --button-shadow-active: none;
141
+ --button-shadow-hover: none;
142
+ --button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm));
143
+ --button-small-radius: var(--radius-lg);
144
+ --button-small-text-size: var(--text-md);
145
+ --button-small-text-weight: 400;
146
+ --button-transition: background-color 0.2s ease;
147
+ --checkbox-border-radius: var(--radius-sm);
148
+ --checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e");
149
+ --checkbox-label-gap: var(--spacing-lg);
150
+ --checkbox-label-padding: var(--spacing-md) calc(2 * var(--spacing-md));
151
+ --checkbox-label-shadow: none;
152
+ --checkbox-label-text-size: var(--text-md);
153
+ --checkbox-label-text-weight: 400;
154
+ --checkbox-shadow: var(--input-shadow);
155
+ --color-accent: var(--primary-500);
156
+ --container-radius: var(--radius-lg);
157
+ --embed-radius: var(--radius-lg);
158
+ --font: 'Source Sans Pro', 'ui-sans-serif', 'system-ui', sans-serif;
159
+ --font-mono: 'IBM Plex Mono', 'ui-monospace', 'Consolas', monospace;
160
+ --form-gap-width: 0px;
161
+ --input-padding: var(--spacing-xl);
162
+ --input-radius: var(--radius-lg);
163
+ --input-text-size: var(--text-md);
164
+ --input-text-weight: 400;
165
+ --layout-gap: var(--spacing-xxl);
166
+ --neutral-100: #f3f4f6;
167
+ --neutral-200: #e5e7eb;
168
+ --neutral-300: #d1d5db;
169
+ --neutral-400: #9ca3af;
170
+ --neutral-50: #f9fafb;
171
+ --neutral-500: #6b7280;
172
+ --neutral-600: #4b5563;
173
+ --neutral-700: #374151;
174
+ --neutral-800: #1f2937;
175
+ --neutral-900: #111827;
176
+ --neutral-950: #0b0f19;
177
+ --primary-100: #dbeafe;
178
+ --primary-200: #bfdbfe;
179
+ --primary-300: #93c5fd;
180
+ --primary-400: #60a5fa;
181
+ --primary-50: #eff6ff;
182
+ --primary-500: #3b82f6;
183
+ --primary-600: #2563eb;
184
+ --primary-700: #1d4ed8;
185
+ --primary-800: #1e40af;
186
+ --primary-900: #1e3a8a;
187
+ --primary-950: #1d3660;
188
+ --prose-header-text-weight: 600;
189
+ --prose-text-size: var(--text-md);
190
+ --prose-text-weight: 400;
191
+ --radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e");
192
+ --radius-lg: 8px;
193
+ --radius-md: 6px;
194
+ --radius-sm: 4px;
195
+ --radius-xl: 12px;
196
+ --radius-xs: 2px;
197
+ --radius-xxl: 22px;
198
+ --radius-xxs: 1px;
199
+ --secondary-100: #dbeafe;
200
+ --secondary-200: #bfdbfe;
201
+ --secondary-300: #93c5fd;
202
+ --secondary-400: #60a5fa;
203
+ --secondary-50: #eff6ff;
204
+ --secondary-500: #3b82f6;
205
+ --secondary-600: #2563eb;
206
+ --secondary-700: #1d4ed8;
207
+ --secondary-800: #1e40af;
208
+ --secondary-900: #1e3a8a;
209
+ --secondary-950: #1d3660;
210
+ --section-header-text-size: var(--text-md);
211
+ --section-header-text-weight: 400;
212
+ --shadow-drop: rgba(0,0,0,0.05) 0px 1px 2px 0px;
213
+ --shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1);
214
+ --shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset;
215
+ --spacing-lg: 8px;
216
+ --spacing-md: 6px;
217
+ --spacing-sm: 4px;
218
+ --spacing-xl: 10px;
219
+ --spacing-xs: 2px;
220
+ --spacing-xxl: 16px;
221
+ --spacing-xxs: 1px;
222
+ --table-radius: var(--radius-lg);
223
+ --text-lg: 16px;
224
+ --text-md: 14px;
225
+ --text-sm: 12px;
226
+ --text-xl: 22px;
227
+ --text-xs: 10px;
228
+ --text-xxl: 26px;
229
+ --text-xxs: 9px;
230
+ --chatbot-text-size: var(--text-lg);
231
+ --button-transform-hover: none;
232
+ --button-transform-active: none;
233
+ --button-primary-shadow: none;
234
+ --button-secondary-shadow: var(--button-primary-shadow);
235
+ --button-medium-padding: var(--spacing-md) calc(2 * var(--spacing-md));
236
+ --button-medium-radius: var(--radius-md);
237
+ --button-medium-text-size: var(--text-md);
238
+ --button-medium-text-weight: 600;
239
+ }"""