Update app.py
Browse filesHelper Function (call_llm): Introduced to handle responses from LiteLLMModel.complete(), ensuring that text is extracted correctly.
Sanity Checker & Answer Expander: Updated to use call_llm instead of direct model calls to avoid type errors.
Other Calls to LLM: Replaced direct calls like llm(prompt) within handle_query and other blending scenarios with call_llm(prompt).
General Adjustments: Ensured consistency in handling model responses throughout the code.
app.py
CHANGED
|
@@ -41,6 +41,18 @@ logger.info("GEMINI API Key loaded successfully.")
|
|
| 41 |
# Instantiate the model using LiteLLMModel
|
| 42 |
llm = LiteLLMModel(model_id="gemini/gemini-pro", api_key=gemini_api_key)
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
###############################################################################
|
| 45 |
# 3) CSV Loading and Processing
|
| 46 |
###############################################################################
|
|
@@ -141,7 +153,7 @@ class QuestionSanityChecker:
|
|
| 141 |
f"Is the above question relevant to daily wellness? Respond with 'Yes' or 'No' only."
|
| 142 |
)
|
| 143 |
try:
|
| 144 |
-
response =
|
| 145 |
is_yes = 'yes' in response.lower()
|
| 146 |
is_no = 'no' in response.lower()
|
| 147 |
logger.debug(f"Sanity check response: '{response}', interpreted as is_yes={is_yes}, is_no={is_no}")
|
|
@@ -162,25 +174,21 @@ sanity_checker = QuestionSanityChecker(llm)
|
|
| 162 |
###############################################################################
|
| 163 |
# 7) smolagents Integration: GROQ Model and Web Search
|
| 164 |
###############################################################################
|
| 165 |
-
# Initialize the smolagents' LiteLLMModel with GROQ model (already instantiated as llm if needed elsewhere)
|
| 166 |
-
|
| 167 |
# Instantiate the DuckDuckGo search tool
|
| 168 |
search_tool = DuckDuckGoSearchTool()
|
| 169 |
|
| 170 |
# Create the web agent with the search tool
|
| 171 |
web_agent = CodeAgent(
|
| 172 |
tools=[search_tool],
|
| 173 |
-
model=llm
|
| 174 |
)
|
| 175 |
|
| 176 |
-
# Define the managed web agent
|
| 177 |
managed_web_agent = ManagedAgent(
|
| 178 |
agent=web_agent,
|
| 179 |
name="web_search",
|
| 180 |
description="Runs a web search for you. Provide your query as an argument."
|
| 181 |
)
|
| 182 |
|
| 183 |
-
# Create the manager agent with managed web agent and additional tools if needed
|
| 184 |
manager_agent = CodeAgent(
|
| 185 |
tools=[], # Add additional tools here if required
|
| 186 |
model=llm,
|
|
@@ -219,7 +227,7 @@ class AnswerExpander:
|
|
| 219 |
)
|
| 220 |
|
| 221 |
logger.debug(f"Generated prompt for answer expansion: {prompt}")
|
| 222 |
-
response =
|
| 223 |
logger.debug(f"Expanded answer: {response}")
|
| 224 |
return response.strip()
|
| 225 |
except Exception as e:
|
|
@@ -328,7 +336,7 @@ def handle_query(query: str, detail: bool = False) -> str:
|
|
| 328 |
f"Previous Answer:\n{cached_answer}\n\n"
|
| 329 |
f"Web Results:\n{web_search_response}"
|
| 330 |
)
|
| 331 |
-
final_answer =
|
| 332 |
else:
|
| 333 |
final_answer = (
|
| 334 |
f"**Daily Wellness AI**\n\n"
|
|
@@ -350,7 +358,7 @@ def handle_query(query: str, detail: bool = False) -> str:
|
|
| 350 |
f"Previous Answer:\n{cached_answer}\n\n"
|
| 351 |
f"New Retrieved Answers:\n" + "\n".join(f"- {r}" for r in responses)
|
| 352 |
)
|
| 353 |
-
final_answer =
|
| 354 |
else:
|
| 355 |
final_answer = answer_expander.expand(query, responses, detail=detail)
|
| 356 |
|
|
|
|
| 41 |
# Instantiate the model using LiteLLMModel
|
| 42 |
llm = LiteLLMModel(model_id="gemini/gemini-pro", api_key=gemini_api_key)
|
| 43 |
|
| 44 |
+
###############################################################################
|
| 45 |
+
# Helper Function for LLM Calls
|
| 46 |
+
###############################################################################
|
| 47 |
+
def call_llm(prompt: str) -> str:
|
| 48 |
+
"""
|
| 49 |
+
Helper to call the LLM with a prompt, handling response extraction.
|
| 50 |
+
"""
|
| 51 |
+
result = llm.complete(prompt)
|
| 52 |
+
if isinstance(result, dict):
|
| 53 |
+
return result.get('text', '')
|
| 54 |
+
return result
|
| 55 |
+
|
| 56 |
###############################################################################
|
| 57 |
# 3) CSV Loading and Processing
|
| 58 |
###############################################################################
|
|
|
|
| 153 |
f"Is the above question relevant to daily wellness? Respond with 'Yes' or 'No' only."
|
| 154 |
)
|
| 155 |
try:
|
| 156 |
+
response = call_llm(prompt)
|
| 157 |
is_yes = 'yes' in response.lower()
|
| 158 |
is_no = 'no' in response.lower()
|
| 159 |
logger.debug(f"Sanity check response: '{response}', interpreted as is_yes={is_yes}, is_no={is_no}")
|
|
|
|
| 174 |
###############################################################################
|
| 175 |
# 7) smolagents Integration: GROQ Model and Web Search
|
| 176 |
###############################################################################
|
|
|
|
|
|
|
| 177 |
# Instantiate the DuckDuckGo search tool
|
| 178 |
search_tool = DuckDuckGoSearchTool()
|
| 179 |
|
| 180 |
# Create the web agent with the search tool
|
| 181 |
web_agent = CodeAgent(
|
| 182 |
tools=[search_tool],
|
| 183 |
+
model=llm
|
| 184 |
)
|
| 185 |
|
|
|
|
| 186 |
managed_web_agent = ManagedAgent(
|
| 187 |
agent=web_agent,
|
| 188 |
name="web_search",
|
| 189 |
description="Runs a web search for you. Provide your query as an argument."
|
| 190 |
)
|
| 191 |
|
|
|
|
| 192 |
manager_agent = CodeAgent(
|
| 193 |
tools=[], # Add additional tools here if required
|
| 194 |
model=llm,
|
|
|
|
| 227 |
)
|
| 228 |
|
| 229 |
logger.debug(f"Generated prompt for answer expansion: {prompt}")
|
| 230 |
+
response = call_llm(prompt)
|
| 231 |
logger.debug(f"Expanded answer: {response}")
|
| 232 |
return response.strip()
|
| 233 |
except Exception as e:
|
|
|
|
| 336 |
f"Previous Answer:\n{cached_answer}\n\n"
|
| 337 |
f"Web Results:\n{web_search_response}"
|
| 338 |
)
|
| 339 |
+
final_answer = call_llm(blend_prompt).strip()
|
| 340 |
else:
|
| 341 |
final_answer = (
|
| 342 |
f"**Daily Wellness AI**\n\n"
|
|
|
|
| 358 |
f"Previous Answer:\n{cached_answer}\n\n"
|
| 359 |
f"New Retrieved Answers:\n" + "\n".join(f"- {r}" for r in responses)
|
| 360 |
)
|
| 361 |
+
final_answer = call_llm(blend_prompt).strip()
|
| 362 |
else:
|
| 363 |
final_answer = answer_expander.expand(query, responses, detail=detail)
|
| 364 |
|