SamarthPujari commited on
Commit
3c20056
·
verified ·
1 Parent(s): 0190090

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -0
app.py CHANGED
@@ -18,6 +18,15 @@ API_KEY = os.getenv("Weather_Token")
18
  # -------------------- TOOL 1: Get Weather --------------------
19
  @tool
20
  def get_current_weather(place: str) -> str:
 
 
 
 
 
 
 
 
 
21
  url = "https://api.openweathermap.org/data/2.5/weather"
22
  params = {
23
  "q": place,
@@ -43,6 +52,16 @@ def get_current_weather(place: str) -> str:
43
  # -------------------- TOOL 2: Get Time --------------------
44
  @tool
45
  def get_current_time_in_timezone(timezone: str) -> str:
 
 
 
 
 
 
 
 
 
 
46
  try:
47
  tz = pytz.timezone(timezone)
48
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
@@ -56,6 +75,16 @@ qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
56
 
57
  @tool
58
  def document_qna_tool(pdf_path: str, question: str) -> str:
 
 
 
 
 
 
 
 
 
 
59
  try:
60
  if not os.path.exists(pdf_path):
61
  return f"[ERROR] File not found: {pdf_path}"
@@ -79,6 +108,15 @@ def document_qna_tool(pdf_path: str, question: str) -> str:
79
  # -------------------- TOOL 4: Local Image Generation --------------------
80
  @tool
81
  def image_generator(prompt: str) -> str:
 
 
 
 
 
 
 
 
 
82
  try:
83
  device = "cuda" if torch.cuda.is_available() else "cpu"
84
  pipe = StableDiffusionPipeline.from_pretrained(
@@ -96,6 +134,9 @@ def image_generator(prompt: str) -> str:
96
  from smolagents import LocalModel
97
 
98
  class TransformersModel(LocalModel):
 
 
 
99
  def __init__(self):
100
  self.pipeline = pipeline(
101
  "text-generation",
@@ -105,6 +146,16 @@ class TransformersModel(LocalModel):
105
  )
106
 
107
  def generate(self, prompt, **kwargs):
 
 
 
 
 
 
 
 
 
 
108
  result = self.pipeline(prompt, max_new_tokens=500, do_sample=True)
109
  return result[0]['generated_text']
110
 
 
18
  # -------------------- TOOL 1: Get Weather --------------------
19
  @tool
20
  def get_current_weather(place: str) -> str:
21
+ """
22
+ Get the current weather for a given location.
23
+
24
+ Args:
25
+ place (str): Name of the city or location (e.g., "London" or "New York").
26
+
27
+ Returns:
28
+ str: Weather condition, temperature, humidity, and wind speed.
29
+ """
30
  url = "https://api.openweathermap.org/data/2.5/weather"
31
  params = {
32
  "q": place,
 
52
  # -------------------- TOOL 2: Get Time --------------------
53
  @tool
54
  def get_current_time_in_timezone(timezone: str) -> str:
55
+ """
56
+ Get the current local time in a given timezone.
57
+
58
+ Args:
59
+ timezone (str): Timezone string in the format 'Region/City',
60
+ e.g., "America/New_York".
61
+
62
+ Returns:
63
+ str: Formatted local time string.
64
+ """
65
  try:
66
  tz = pytz.timezone(timezone)
67
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
 
75
 
76
  @tool
77
  def document_qna_tool(pdf_path: str, question: str) -> str:
78
+ """
79
+ Answer a natural language question based on the content of a PDF document.
80
+
81
+ Args:
82
+ pdf_path (str): Path to the local PDF file.
83
+ question (str): Natural language question to ask about the PDF.
84
+
85
+ Returns:
86
+ str: Answer generated from the most relevant section of the document.
87
+ """
88
  try:
89
  if not os.path.exists(pdf_path):
90
  return f"[ERROR] File not found: {pdf_path}"
 
108
  # -------------------- TOOL 4: Local Image Generation --------------------
109
  @tool
110
  def image_generator(prompt: str) -> str:
111
+ """
112
+ Generate an image from a given text prompt using Stable Diffusion.
113
+
114
+ Args:
115
+ prompt (str): Description of the image to generate.
116
+
117
+ Returns:
118
+ str: Path to the saved generated image.
119
+ """
120
  try:
121
  device = "cuda" if torch.cuda.is_available() else "cpu"
122
  pipe = StableDiffusionPipeline.from_pretrained(
 
134
  from smolagents import LocalModel
135
 
136
  class TransformersModel(LocalModel):
137
+ """
138
+ Local text generation model wrapper using Hugging Face Transformers.
139
+ """
140
  def __init__(self):
141
  self.pipeline = pipeline(
142
  "text-generation",
 
146
  )
147
 
148
  def generate(self, prompt, **kwargs):
149
+ """
150
+ Generate text from a given prompt.
151
+
152
+ Args:
153
+ prompt (str): Input prompt for generation.
154
+ **kwargs: Additional parameters for the pipeline.
155
+
156
+ Returns:
157
+ str: Generated text output.
158
+ """
159
  result = self.pipeline(prompt, max_new_tokens=500, do_sample=True)
160
  return result[0]['generated_text']
161