krzsam's picture
commit
86cbfce
raw
history blame
5.3 kB
from smolagents import CodeAgent, InferenceClientModel, WebSearchTool, FinalAnswerTool
#from smolagents import ApiModel
#from mistralai import Mistral
import os
# --------- my tools
from my_tool_reverse_string import ReverseStringTool
from my_tool_image_load import ImageLoadTool
from my_tool_chess_board import ChessBoard
from my_tool_fen import FENTool
from my_tool_chess_analysis import ChessAnalysisTool
from my_prompt_config import MyPromptConfig
from dotenv import load_dotenv
# https://huggingface.co/docs/transformers/model_doc/mistral?usage=Pipeline
# --- Basic Agent Definition ---
# ----- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ------
class MyAgent:
#MODEL_REASONING = "Qwen/Qwen2.5-Coder-32B-Instruct"
#MODEL_REASONING = "deepseek-ai/DeepSeek-R1"
MODEL_REASONING = "Qwen/Qwen3-235B-A22B" # test more
#MODEL_REASONING = "mistralai/Mixtral-8x22B-v0.1" # not available via HuggingFace
chess_board_model_name = "my_chess_pieces_recognition.pth"
chess_board_model_dir = "/mnt/c/Users/krzsa/IdeaProjects/Agents-Course-Assignment/saved_models"
def __init__(self):
print("Agent initialized.")
load_dotenv()
self.mistral_api_key = os.environ["MISTRAL_API_KEY"]
self.__create_agents__()
def __create_agents__(self):
# --- Mistral -----------------------------------------------------------------------------------
# will need class implementation based on ApiModel
# https://github.com/huggingface/smolagents/blob/main/src/smolagents/models.py
# https://github.com/mistralai/client-python/tree/main/examples
#self.mistral_client = Mistral(api_key=self.mistral_api_key)
#self.model = ApiModel(
# model_id="mistral-large-2411",
# client=self.mistral_client
#)
# --- Mistral -----------------------------------------------------------------------------------
# --- HF Inference ------------------------------------------------------------------------------
self.model = InferenceClientModel(model_id=self.MODEL_REASONING)
# --- HF Inference ------------------------------------------------------------------------------
self.reasoning_agent = CodeAgent(
name="CourseAssistant",
description="General AI Assistant",
tools=[
ImageLoadTool(),
ReverseStringTool(),
ChessBoard(self.chess_board_model_name, self.chess_board_model_dir),
FENTool(),
ChessAnalysisTool(),
WebSearchTool(),
FinalAnswerTool(),
],
model=self.model,
planning_interval=3, # This is where you activate planning!,
prompt_templates=MyPromptConfig.PROMPT_TEMPLATES,
managed_agents=[],
additional_authorized_imports=[
"PIL",
"chess",
"matplotlib",
"matplotlib.pyplot",
"stockfish",
"requests",
"wikipediaapi",
"bs4",
"my_tool_chess_analysis",
"my_tool_chess_board",
"my_tool_fen",
"my_tool_image_load",
"my_tool_reverse_string",
],
)
#web_search_agent = CodeAgent(
# tools=[WebSearchTool()],
# model=InferenceClientModel(model_id=self.MODEL_CODER),
# name="agent_websearch",
# description="Agent to browse and search and extract web content"
#)
# self.image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True)
#image_generation_tool = Tool.from_space(
# "black-forest-labs/FLUX.1-schnell",
# name="image_generator",
# description="Generate an image from a prompt"
#)
#image_captioning_tool = Tool.from_space(
# "ovi054/image-to-prompt",
# name="image_captioning",
# description="Generate description of an image"
#)
#image_loading_tool = ImageLoadTool()
#print(f"Image load tool: {image_loading_tool}")
#image_generation_agent = CodeAgent(
# tools=[image_generation_tool],
# model=InferenceClientModel(model_id=self.MODEL_CODER)
#)
# ImageLoadTool()
#self.reasoning_agent = CodeAgent(
# #tools=[image_generation_tool, image_captioning_tool, ReverseStringTool(), image_loading_tool],
# tools=[image_loading_tool, FinalAnswerTool()],
# model=InferenceClientModel(model_id=self.MODEL_REASONING),
# planning_interval=3, # This is where you activate planning!,
# prompt_templates=PromptConfig().PROMPT_TEMPLATES,
# managed_agents=[web_search_agent],
# additional_authorized_imports=["PIL","chess","my_tools","my_tools."],
#)
print(f"Main agent initialized: {self.reasoning_agent}")
def __call__(self, question: str) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
answer = self.reasoning_agent.run(question)
print(f"Agent returning answer: {answer}")
return answer