Spaces:
Sleeping
Sleeping
| import yaml | |
| import os | |
| from smolagents import GradioUI, CodeAgent, LiteLLMModel | |
| # from litellm import token_counter # fallback when provider doesn't return usage | |
| # Get current directory path | |
| CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
| from tools.web_search import DuckDuckGoSearchTool as WebSearch | |
| from tools.visit_webpage import VisitWebpageTool as VisitWebpage | |
| from tools.suggest_menu import SimpleTool as SuggestMenu | |
| from tools.catering_service_tool import SimpleTool as CateringServiceTool | |
| from tools.superhero_party_theme_generator import SuperheroPartyThemeTool as SuperheroPartyThemeGenerator | |
| from tools.final_answer import FinalAnswerTool as FinalAnswer | |
| # class CountedLiteLLMModel(LiteLLMModel): | |
| # def __init__(self, *args, **kwargs): | |
| # super().__init__(*args, **kwargs) | |
| # # ensure attrs exist (instrumentor will read them) | |
| # self.last_input_token_count = 0 | |
| # self.last_output_token_count = 0 | |
| # def generate(self, *args, **kwargs): | |
| # # call as usual | |
| # out = super().generate(*args, **kwargs) | |
| # # try to read usage from the last response if LiteLLM provided it | |
| # usage = getattr(self, "last_response_usage", None) | |
| # # some smolagents versions store usage on the response object instead: | |
| # if usage is None: | |
| # usage = getattr(out, "usage", None) | |
| # if usage: | |
| # # common OpenAI-style keys; adjust if your provider uses different names | |
| # self.last_input_token_count = \ | |
| # usage.get("prompt_tokens") or usage.get("input_tokens") or 0 | |
| # self.last_output_token_count = \ | |
| # usage.get("completion_tokens") or usage.get("output_tokens") or 0 | |
| # else: | |
| # # fallback: estimate with LiteLLM's token_counter to avoid zeros | |
| # prompt = kwargs.get("prompt") or (args[0] if args else "") | |
| # try: | |
| # self.last_input_token_count = token_counter(model=self.model_id, text=prompt) or 0 | |
| # except Exception: | |
| # pass # leave prior value if estimation fails | |
| # return out | |
| model = LiteLLMModel( | |
| model_id='claude-3-5-sonnet-latest', | |
| api_key=os.getenv('ANTHROPIC_API_KEY'), | |
| ) | |
| web_search = WebSearch() | |
| visit_webpage = VisitWebpage() | |
| suggest_menu = SuggestMenu() | |
| catering_service_tool = CateringServiceTool() | |
| superhero_party_theme_generator = SuperheroPartyThemeGenerator() | |
| final_answer = FinalAnswer() | |
| with open(os.path.join(CURRENT_DIR, "prompts.yaml"), 'r') as stream: | |
| prompt_templates = yaml.safe_load(stream) | |
| agent = CodeAgent( | |
| model=model, | |
| tools=[web_search, visit_webpage, suggest_menu, catering_service_tool, superhero_party_theme_generator], | |
| managed_agents=[], | |
| max_steps=10, | |
| verbosity_level=2, | |
| planning_interval=None, | |
| name=None, | |
| description=None, | |
| executor_type='local', | |
| executor_kwargs={}, | |
| max_print_outputs_length=None, | |
| prompt_templates=prompt_templates | |
| ) | |
| if __name__ == "__main__": | |
| GradioUI(agent).launch() | |