Spaces:
Sleeping
Sleeping
Update pipeline.py
Browse files- pipeline.py +3 -2
pipeline.py
CHANGED
|
@@ -12,7 +12,7 @@ from langchain.embeddings import HuggingFaceEmbeddings
|
|
| 12 |
from langchain.vectorstores import FAISS
|
| 13 |
from langchain.chains import RetrievalQA
|
| 14 |
|
| 15 |
-
from smolagents import DuckDuckGoSearchTool, ManagedAgent, LiteLLMModel ,CodeAgent
|
| 16 |
from pydantic import BaseModel, Field, ValidationError, validator
|
| 17 |
from mistralai import Mistral
|
| 18 |
|
|
@@ -315,7 +315,8 @@ def do_cached_web_search(query: str) -> str:
|
|
| 315 |
# 2) If no suitable cached answer, do a new search
|
| 316 |
try:
|
| 317 |
print("DEBUG: Performing a new web search...")
|
| 318 |
-
model = LiteLLMModel(model_id="gemini/gemini-pro", api_key=os.environ.get("GEMINI_API_KEY"))
|
|
|
|
| 319 |
search_tool = DuckDuckGoSearchTool()
|
| 320 |
web_agent = CodeAgent(
|
| 321 |
tools=[search_tool],
|
|
|
|
| 12 |
from langchain.vectorstores import FAISS
|
| 13 |
from langchain.chains import RetrievalQA
|
| 14 |
|
| 15 |
+
from smolagents import DuckDuckGoSearchTool, ManagedAgent, LiteLLMModel ,CodeAgent,
|
| 16 |
from pydantic import BaseModel, Field, ValidationError, validator
|
| 17 |
from mistralai import Mistral
|
| 18 |
|
|
|
|
| 315 |
# 2) If no suitable cached answer, do a new search
|
| 316 |
try:
|
| 317 |
print("DEBUG: Performing a new web search...")
|
| 318 |
+
# model = LiteLLMModel(model_id="gemini/gemini-pro", api_key=os.environ.get("GEMINI_API_KEY"))
|
| 319 |
+
model=HfApiModel()
|
| 320 |
search_tool = DuckDuckGoSearchTool()
|
| 321 |
web_agent = CodeAgent(
|
| 322 |
tools=[search_tool],
|