petkar commited on
Commit
52b1d4b
·
verified ·
1 Parent(s): 492aa00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -8
app.py CHANGED
@@ -1,8 +1,11 @@
1
- from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, load_tool, tool
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
 
 
 
6
 
7
 
8
  # Initialize the Hugging Face web search model
@@ -55,20 +58,27 @@ def get_current_time_in_timezone(timezone: str) -> str:
55
 
56
 
57
  final_answer = FinalAnswerTool()
58
- model = InferenceClientModel(
59
- max_tokens=2096,
60
- temperature=0.5,
61
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
62
- custom_role_conversions=None,
 
 
 
 
63
  )
64
 
 
 
 
 
65
  with open("prompts.yaml", 'r') as stream:
66
  prompt_templates = yaml.safe_load(stream)
67
 
68
- # We're creating our CodeAgent
69
  agent = CodeAgent(
70
  model=model,
71
- tools=[final_answer], # add your tools here (don't remove final_answer)
72
  max_steps=6,
73
  verbosity_level=1,
74
  grammar=None,
@@ -78,5 +88,10 @@ agent = CodeAgent(
78
  prompt_templates=prompt_templates
79
  )
80
 
 
81
  GradioUI(agent).launch()
 
 
 
 
82
 
 
1
+ from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
+ from tools.final_answer import FinalAnswerTool
7
+
8
+ from Gradio_UI import GradioUI
9
 
10
 
11
  # Initialize the Hugging Face web search model
 
58
 
59
 
60
  final_answer = FinalAnswerTool()
61
+
62
+ # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
63
+ # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
64
+
65
+ model = HfApiModel(
66
+ max_tokens=2096,
67
+ temperature=0.5,
68
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
69
+ custom_role_conversions=None,
70
  )
71
 
72
+
73
+ # Import tool from Hub
74
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
75
+
76
  with open("prompts.yaml", 'r') as stream:
77
  prompt_templates = yaml.safe_load(stream)
78
 
 
79
  agent = CodeAgent(
80
  model=model,
81
+ tools=[final_answer], ## add your tools here (don't remove final answer)
82
  max_steps=6,
83
  verbosity_level=1,
84
  grammar=None,
 
88
  prompt_templates=prompt_templates
89
  )
90
 
91
+
92
  GradioUI(agent).launch()
93
+
94
+
95
+
96
+
97