File size: 3,051 Bytes
889cfdb
 
bddfeea
7ddcd74
889cfdb
 
 
 
 
 
 
 
 
 
 
7ddcd74
 
 
 
 
 
889cfdb
7ddcd74
 
 
e188063
7ddcd74
 
 
 
 
e188063
7ddcd74
 
 
 
 
 
 
 
 
 
 
 
 
e188063
7ddcd74
889cfdb
7ddcd74
bddfeea
 
889cfdb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import yaml
import os
from smolagents import GradioUI, CodeAgent, LiteLLMModel
# from litellm import token_counter  # fallback when provider doesn't return usage

# Get current directory path
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))

from tools.web_search import DuckDuckGoSearchTool as WebSearch
from tools.visit_webpage import VisitWebpageTool as VisitWebpage
from tools.suggest_menu import SimpleTool as SuggestMenu
from tools.catering_service_tool import SimpleTool as CateringServiceTool
from tools.superhero_party_theme_generator import SuperheroPartyThemeTool as SuperheroPartyThemeGenerator
from tools.final_answer import FinalAnswerTool as FinalAnswer

# class CountedLiteLLMModel(LiteLLMModel):
#     def __init__(self, *args, **kwargs):
#         super().__init__(*args, **kwargs)
#         # ensure attrs exist (instrumentor will read them)
#         self.last_input_token_count = 0
#         self.last_output_token_count = 0

#     def generate(self, *args, **kwargs):
#         # call as usual
#         out = super().generate(*args, **kwargs)

#         # try to read usage from the last response if LiteLLM provided it
#         usage = getattr(self, "last_response_usage", None)
#         # some smolagents versions store usage on the response object instead:
#         if usage is None:
#             usage = getattr(out, "usage", None)

#         if usage:
#             # common OpenAI-style keys; adjust if your provider uses different names
#             self.last_input_token_count = \
#                 usage.get("prompt_tokens") or usage.get("input_tokens") or 0
#             self.last_output_token_count = \
#                 usage.get("completion_tokens") or usage.get("output_tokens") or 0
#         else:
#             # fallback: estimate with LiteLLM's token_counter to avoid zeros
#             prompt = kwargs.get("prompt") or (args[0] if args else "")
#             try:
#                 self.last_input_token_count = token_counter(model=self.model_id, text=prompt) or 0
#             except Exception:
#                 pass  # leave prior value if estimation fails

#         return out

model = LiteLLMModel(
    model_id='claude-3-5-sonnet-latest',
    api_key=os.getenv('ANTHROPIC_API_KEY'),
)

web_search = WebSearch()
visit_webpage = VisitWebpage()
suggest_menu = SuggestMenu()
catering_service_tool = CateringServiceTool()
superhero_party_theme_generator = SuperheroPartyThemeGenerator()
final_answer = FinalAnswer()


with open(os.path.join(CURRENT_DIR, "prompts.yaml"), 'r') as stream:
    prompt_templates = yaml.safe_load(stream)

agent = CodeAgent(
    model=model,
    tools=[web_search, visit_webpage, suggest_menu, catering_service_tool, superhero_party_theme_generator],
    managed_agents=[],
    max_steps=10,
    verbosity_level=2,
    planning_interval=None,
    name=None,
    description=None,
    executor_type='local',
    executor_kwargs={},
    max_print_outputs_length=None,
    prompt_templates=prompt_templates
)
if __name__ == "__main__":
    GradioUI(agent).launch()