Update agents.py
Browse files
agents.py
CHANGED
|
@@ -1,25 +1,18 @@
|
|
| 1 |
-
from smolagents import
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
ChatMessage,
|
| 6 |
-
ToolCallingAgent)
|
| 7 |
-
from smolagents.default_tools import (DuckDuckGoSearchTool,
|
| 8 |
-
VisitWebpageTool,
|
| 9 |
-
WikipediaSearchTool,
|
| 10 |
SpeechToTextTool,
|
| 11 |
PythonInterpreterTool)
|
| 12 |
import yaml
|
| 13 |
-
from final_answer import FinalAnswerTool
|
| 14 |
-
from tools import (youtube_frames_to_images, use_vision_model,
|
| 15 |
-
read_file, download_file_from_url,
|
| 16 |
-
extract_text_from_image, analyze_csv_file,
|
| 17 |
analyze_excel_file, youtube_transcribe,
|
| 18 |
-
|
| 19 |
import os
|
| 20 |
-
import traceback
|
| 21 |
from dotenv import load_dotenv
|
| 22 |
-
import time
|
| 23 |
|
| 24 |
load_dotenv()
|
| 25 |
|
|
@@ -27,26 +20,13 @@ load_dotenv()
|
|
| 27 |
with open("prompts.yaml", 'r') as stream:
|
| 28 |
prompt_templates = yaml.safe_load(stream)
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
class SlowLiteLLMModel(LiteLLMModel):
|
| 33 |
-
def __init__(self, *args, **kwargs):
|
| 34 |
-
super().__init__(*args, **kwargs)
|
| 35 |
-
|
| 36 |
-
def __call__(self, messages, **kwargs) -> ChatMessage:
|
| 37 |
-
time.sleep(15)
|
| 38 |
-
# prepend onto whatever messages the Agent built
|
| 39 |
-
return super().__call__(messages, **kwargs)
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
react_model_name = "gemini-1.5-mini"
|
| 43 |
react_model = LiteLLMModel(
|
| 44 |
-
model_id="
|
| 45 |
api_key=os.getenv("GEMINI_KEY"),
|
| 46 |
temperature=0.2
|
| 47 |
)
|
| 48 |
|
| 49 |
-
|
| 50 |
manager_agent = CodeAgent(
|
| 51 |
model=react_model,
|
| 52 |
tools=[FinalAnswerTool(),
|
|
@@ -70,11 +50,8 @@ manager_agent = CodeAgent(
|
|
| 70 |
planning_interval=6,
|
| 71 |
name="Manager",
|
| 72 |
description="The manager of the team, responsible for overseeing and guiding the team's work.",
|
| 73 |
-
|
| 74 |
-
prompt_templates=prompt_templates
|
| 75 |
-
)
|
| 76 |
-
|
| 77 |
-
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
|
|
|
| 80 |
GradioUI(manager_agent).launch()
|
|
|
|
| 1 |
+
from smolagents import CodeAgent, LiteLLMModel
|
| 2 |
+
from smolagents.default_tools import (DuckDuckGoSearchTool,
|
| 3 |
+
VisitWebpageTool,
|
| 4 |
+
WikipediaSearchTool,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
SpeechToTextTool,
|
| 6 |
PythonInterpreterTool)
|
| 7 |
import yaml
|
| 8 |
+
from final_answer import FinalAnswerTool
|
| 9 |
+
from tools import (youtube_frames_to_images, use_vision_model,
|
| 10 |
+
read_file, download_file_from_url,
|
| 11 |
+
extract_text_from_image, analyze_csv_file,
|
| 12 |
analyze_excel_file, youtube_transcribe,
|
| 13 |
+
transcribe_audio, review_youtube_video)
|
| 14 |
import os
|
|
|
|
| 15 |
from dotenv import load_dotenv
|
|
|
|
| 16 |
|
| 17 |
load_dotenv()
|
| 18 |
|
|
|
|
| 20 |
with open("prompts.yaml", 'r') as stream:
|
| 21 |
prompt_templates = yaml.safe_load(stream)
|
| 22 |
|
| 23 |
+
# WORKING MODEL SETUP
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
react_model = LiteLLMModel(
|
| 25 |
+
model_id="gemini/gemini-1.5-mini",
|
| 26 |
api_key=os.getenv("GEMINI_KEY"),
|
| 27 |
temperature=0.2
|
| 28 |
)
|
| 29 |
|
|
|
|
| 30 |
manager_agent = CodeAgent(
|
| 31 |
model=react_model,
|
| 32 |
tools=[FinalAnswerTool(),
|
|
|
|
| 50 |
planning_interval=6,
|
| 51 |
name="Manager",
|
| 52 |
description="The manager of the team, responsible for overseeing and guiding the team's work.",
|
| 53 |
+
prompt_templates=prompt_templates)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
if __name__ == "__main__":
|
| 56 |
+
from smolagents import GradioUI
|
| 57 |
GradioUI(manager_agent).launch()
|