SamarthPujari commited on
Commit
b4da6a9
·
verified ·
1 Parent(s): 27f5838

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -23
app.py CHANGED
@@ -1,18 +1,22 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
  import os
7
  from tools.final_answer import FinalAnswerTool
8
-
9
  from Gradio_UI import GradioUI
10
 
 
 
 
 
 
11
  API_KEY = os.getenv("Weather_Token")
12
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
 
13
  @tool
14
- def get_current_weather(place: str)-> str: #it's import to specify the return type
15
- #Keep this format for the description / args / args description but feel free to modify the tool
16
  """A tool that fetches the current weather of a particular place
17
  Args:
18
  place: A string representing a valid place (e.g., 'London/Paris')
@@ -24,19 +28,17 @@ def get_current_weather(place: str)-> str: #it's import to specify the return ty
24
  "appid": api_key,
25
  "units": "metric"
26
  }
27
-
28
  try:
29
  response = requests.get(url, params=params)
30
  data = response.json()
31
 
32
- # Check if the request was successful
33
  if response.status_code == 200:
34
  weather_desc = data["weather"][0]["description"]
35
  temperature = data["main"]["temp"]
36
  humidity = data["main"]["humidity"]
37
  wind_speed = data["wind"]["speed"]
38
 
39
- # Display the results
40
  return (
41
  f"Weather in {place}:\n"
42
  f"- Condition: {weather_desc}\n"
@@ -49,6 +51,7 @@ def get_current_weather(place: str)-> str: #it's import to specify the return ty
49
  except Exception as e:
50
  return f"Error fetching weather data for '{place}': {str(e)}"
51
 
 
52
  @tool
53
  def get_current_time_in_timezone(timezone: str) -> str:
54
  """A tool that fetches the current local time in a specified timezone.
@@ -56,38 +59,77 @@ def get_current_time_in_timezone(timezone: str) -> str:
56
  timezone: A string representing a valid timezone (e.g., 'America/New_York').
57
  """
58
  try:
59
- # Create timezone object
60
  tz = pytz.timezone(timezone)
61
- # Get current time in that timezone
62
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
63
  return f"The current local time in {timezone} is: {local_time}"
64
  except Exception as e:
65
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
 
68
  final_answer = FinalAnswerTool()
69
  search_tool = DuckDuckGoSearchTool()
70
 
71
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
72
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
73
-
74
  model = HfApiModel(
75
- max_tokens=2096,
76
- temperature=0.5,
77
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
78
- custom_role_conversions=None,
79
  )
80
 
81
-
82
- # Import tool from Hub
83
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
84
 
85
  with open("prompts.yaml", 'r') as stream:
86
  prompt_templates = yaml.safe_load(stream)
87
-
88
  agent = CodeAgent(
89
  model=model,
90
- tools=[get_current_time_in_timezone, get_current_weather, image_generation_tool, search_tool, final_answer], ## add your tools here (don't remove final answer)
 
 
 
 
 
 
 
91
  max_steps=6,
92
  verbosity_level=1,
93
  grammar=None,
@@ -97,5 +139,4 @@ agent = CodeAgent(
97
  prompt_templates=prompt_templates
98
  )
99
 
100
-
101
- GradioUI(agent).launch()
 
1
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
  import os
7
  from tools.final_answer import FinalAnswerTool
 
8
  from Gradio_UI import GradioUI
9
 
10
+ import fitz # PyMuPDF
11
+ from sentence_transformers import SentenceTransformer, util
12
+ from transformers import pipeline
13
+
14
+ # API Key for weather
15
  API_KEY = os.getenv("Weather_Token")
16
+
17
+ # -------------------- TOOL 1: Get Weather --------------------
18
  @tool
19
+ def get_current_weather(place: str) -> str:
 
20
  """A tool that fetches the current weather of a particular place
21
  Args:
22
  place: A string representing a valid place (e.g., 'London/Paris')
 
28
  "appid": api_key,
29
  "units": "metric"
30
  }
31
+
32
  try:
33
  response = requests.get(url, params=params)
34
  data = response.json()
35
 
 
36
  if response.status_code == 200:
37
  weather_desc = data["weather"][0]["description"]
38
  temperature = data["main"]["temp"]
39
  humidity = data["main"]["humidity"]
40
  wind_speed = data["wind"]["speed"]
41
 
 
42
  return (
43
  f"Weather in {place}:\n"
44
  f"- Condition: {weather_desc}\n"
 
51
  except Exception as e:
52
  return f"Error fetching weather data for '{place}': {str(e)}"
53
 
54
+ # -------------------- TOOL 2: Get Time --------------------
55
  @tool
56
  def get_current_time_in_timezone(timezone: str) -> str:
57
  """A tool that fetches the current local time in a specified timezone.
 
59
  timezone: A string representing a valid timezone (e.g., 'America/New_York').
60
  """
61
  try:
 
62
  tz = pytz.timezone(timezone)
 
63
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
64
  return f"The current local time in {timezone} is: {local_time}"
65
  except Exception as e:
66
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
67
 
68
+ # -------------------- TOOL 3: Document QnA --------------------
69
+ embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
70
+ qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
71
+
72
+ @tool
73
+ def document_qna_tool(pdf_path: str, question: str) -> str:
74
+ """A tool for answering questions based on the content of a PDF document.
75
+ Args:
76
+ pdf_path: A string path to the local PDF file.
77
+ question: A natural language question to ask about the PDF content.
78
+ """
79
+ try:
80
+ # Step 1: Extract text from PDF
81
+ doc = fitz.open(pdf_path)
82
+ text_chunks = []
83
+ for page in doc:
84
+ text = page.get_text()
85
+ if text.strip():
86
+ text_chunks.append(text)
87
+ doc.close()
88
+
89
+ if not text_chunks:
90
+ return "No text found in the PDF."
91
+
92
+ # Step 2: Semantic search
93
+ embeddings = embedding_model.encode(text_chunks, convert_to_tensor=True)
94
+ question_embedding = embedding_model.encode(question, convert_to_tensor=True)
95
+ scores = util.pytorch_cos_sim(question_embedding, embeddings)[0]
96
+ best_match_idx = scores.argmax()
97
+ best_context = text_chunks[best_match_idx]
98
+
99
+ # Step 3: Answer question
100
+ prompt = f"Context: {best_context}\nQuestion: {question}"
101
+ answer = qa_pipeline(prompt, max_new_tokens=100)[0]['generated_text']
102
+ return f"Answer: {answer.strip()}"
103
+
104
+ except Exception as e:
105
+ return f"Error processing document QnA: {str(e)}"
106
 
107
+ # -------------------- Other Components --------------------
108
  final_answer = FinalAnswerTool()
109
  search_tool = DuckDuckGoSearchTool()
110
 
 
 
 
111
  model = HfApiModel(
112
+ max_tokens=2096,
113
+ temperature=0.5,
114
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
115
+ custom_role_conversions=None,
116
  )
117
 
 
 
118
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
119
 
120
  with open("prompts.yaml", 'r') as stream:
121
  prompt_templates = yaml.safe_load(stream)
122
+
123
  agent = CodeAgent(
124
  model=model,
125
+ tools=[
126
+ get_current_time_in_timezone,
127
+ get_current_weather,
128
+ image_generation_tool,
129
+ search_tool,
130
+ document_qna_tool, # ← New Tool Added
131
+ final_answer
132
+ ],
133
  max_steps=6,
134
  verbosity_level=1,
135
  grammar=None,
 
139
  prompt_templates=prompt_templates
140
  )
141
 
142
+ GradioUI(agent).launch()