| from smolagents import CodeAgent, InferenceClientModel, WebSearchTool, FinalAnswerTool | |
| #from smolagents import ApiModel | |
| #from mistralai import Mistral | |
| import os | |
| # --------- my tools | |
| from my_tool_reverse_string import ReverseStringTool | |
| from my_tool_image_load import ImageLoadTool | |
| from my_tool_chess_board import ChessBoard | |
| from my_tool_fen import FENTool | |
| from my_tool_chess_analysis import ChessAnalysisTool | |
| from my_prompt_config import MyPromptConfig | |
| from my_tool_wiki_page_section import MyWikiPageSectionTool | |
| from my_tool_wiki_filter_tables import MyWikiTableFilterTool | |
| from my_tool_wiki_featured_articles import MyWikiFeaturedArticles | |
| from my_tools_libretexts import MyLibreTextsBookshelvesTool, MyLibreTextsBooksTool | |
| from my_tools_libretexts import MyLibreTextsBookSectionsTool, MyLibreTextsBookSectionParagraphsTool | |
| from my_tools_libretexts import MyLibreTextsParagraphContentsTool | |
| from dotenv import load_dotenv | |
| # https://huggingface.co/docs/transformers/model_doc/mistral?usage=Pipeline | |
| # --- Basic Agent Definition --- | |
| # ----- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ------ | |
| class MyAgent: | |
| #MODEL_REASONING = "Qwen/Qwen2.5-Coder-32B-Instruct" | |
| #MODEL_REASONING = "deepseek-ai/DeepSeek-R1" | |
| MODEL_REASONING = "Qwen/Qwen3-235B-A22B" # test more | |
| #MODEL_REASONING = "mistralai/Mixtral-8x22B-v0.1" # not available via HuggingFace | |
| chess_board_model_name = "my_chess_pieces_recognition.pth" | |
| chess_board_model_dir = "/mnt/c/Users/krzsa/IdeaProjects/Agents-Course-Assignment/saved_models" | |
| def __init__(self): | |
| print("Agent initialized.") | |
| load_dotenv() | |
| self.mistral_api_key = os.environ["MISTRAL_API_KEY"] | |
| self.__create_agents__() | |
| def __create_agents__(self): | |
| # --- Mistral ----------------------------------------------------------------------------------- | |
| # will need class implementation based on ApiModel | |
| # https://github.com/huggingface/smolagents/blob/main/src/smolagents/models.py | |
| # https://github.com/mistralai/client-python/tree/main/examples | |
| #self.mistral_client = Mistral(api_key=self.mistral_api_key) | |
| #self.model = ApiModel( | |
| # model_id="mistral-large-2411", | |
| # client=self.mistral_client | |
| #) | |
| # --- Mistral ----------------------------------------------------------------------------------- | |
| # --- HF Inference ------------------------------------------------------------------------------ | |
| self.model = InferenceClientModel(model_id=self.MODEL_REASONING) | |
| # --- HF Inference ------------------------------------------------------------------------------ | |
| # Instruction how to specify tools | |
| # 1. Implement a tool (one or more tools per file) | |
| # 2. Add imports for used tools in the header for this file | |
| # 3. Add file(s) with tools to the 'additional_authorized_imports' below | |
| # 4. Add tool(s) to 'tools' below | |
| # 5. Add tool(s) name(s) to prompt configuration in my_prompt_config.py | |
| self.reasoning_agent = CodeAgent( | |
| name="CourseAssistant", | |
| description="General AI Assistant", | |
| tools=[ | |
| ImageLoadTool(), | |
| ReverseStringTool(), | |
| ChessBoard(self.chess_board_model_name, self.chess_board_model_dir), | |
| FENTool(), | |
| ChessAnalysisTool(), | |
| MyWikiPageSectionTool(), | |
| MyWikiTableFilterTool(), | |
| WebSearchTool(), | |
| MyWikiFeaturedArticles(), | |
| MyLibreTextsBookshelvesTool(), | |
| MyLibreTextsBooksTool(), | |
| MyLibreTextsBookSectionsTool(), | |
| MyLibreTextsBookSectionParagraphsTool(), | |
| MyLibreTextsParagraphContentsTool(), | |
| FinalAnswerTool(), | |
| ], | |
| model=self.model, | |
| planning_interval=3, # This is where you activate planning!, | |
| prompt_templates=MyPromptConfig.PROMPT_TEMPLATES, | |
| managed_agents=[], | |
| additional_authorized_imports=[ | |
| "PIL", | |
| "chess", | |
| "matplotlib", | |
| "matplotlib.pyplot", | |
| "stockfish", | |
| "requests", | |
| "bs4", | |
| "json", | |
| "lxml", | |
| "wikitextparser", | |
| "mwparserfromhell" | |
| "my_tool_chess_analysis", | |
| "my_tool_chess_board", | |
| "my_tool_fen", | |
| "my_tool_image_load", | |
| "my_tool_reverse_string", | |
| "my_tool_wiki_page_section", | |
| "my_tool_wiki_table_filter", | |
| "my_tool_wiki_featured_articles", | |
| "my_tools_libretexts", | |
| "my_base_libretexts_api" | |
| ], | |
| ) | |
| #web_search_agent = CodeAgent( | |
| # tools=[WebSearchTool()], | |
| # model=InferenceClientModel(model_id=self.MODEL_CODER), | |
| # name="agent_websearch", | |
| # description="Agent to browse and search and extract web content" | |
| #) | |
| # self.image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) | |
| #image_generation_tool = Tool.from_space( | |
| # "black-forest-labs/FLUX.1-schnell", | |
| # name="image_generator", | |
| # description="Generate an image from a prompt" | |
| #) | |
| #image_captioning_tool = Tool.from_space( | |
| # "ovi054/image-to-prompt", | |
| # name="image_captioning", | |
| # description="Generate description of an image" | |
| #) | |
| #image_loading_tool = ImageLoadTool() | |
| #print(f"Image load tool: {image_loading_tool}") | |
| #image_generation_agent = CodeAgent( | |
| # tools=[image_generation_tool], | |
| # model=InferenceClientModel(model_id=self.MODEL_CODER) | |
| #) | |
| # ImageLoadTool() | |
| #self.reasoning_agent = CodeAgent( | |
| # #tools=[image_generation_tool, image_captioning_tool, ReverseStringTool(), image_loading_tool], | |
| # tools=[image_loading_tool, FinalAnswerTool()], | |
| # model=InferenceClientModel(model_id=self.MODEL_REASONING), | |
| # planning_interval=3, # This is where you activate planning!, | |
| # prompt_templates=PromptConfig().PROMPT_TEMPLATES, | |
| # managed_agents=[web_search_agent], | |
| # additional_authorized_imports=["PIL","chess","my_tools","my_tools."], | |
| #) | |
| print(f"Main agent initialized: {self.reasoning_agent}") | |
| def __call__(self, question: str) -> str: | |
| print(f"Agent received question (first 50 chars): {question[:50]}...") | |
| answer = self.reasoning_agent.run(question) | |
| print(f"Agent returning answer: {answer}") | |
| return answer |