Chikeka commited on
Commit
d6266eb
·
verified ·
1 Parent(s): cf19f84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -6
app.py CHANGED
@@ -89,24 +89,32 @@ def get_current_time_in_timezone(timezone: str) -> str:
89
 
90
  final_answer = FinalAnswerTool()
91
 
92
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
93
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
94
  model = HfApiModel(
95
  max_tokens=2096,
96
  temperature=0.5,
97
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct', # it is possible that this model may be overloaded
98
  custom_role_conversions=None,
99
  )
100
 
101
  # Import tool from Hub
102
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
 
 
 
103
 
104
  with open("prompts.yaml", 'r') as stream:
105
  prompt_templates = yaml.safe_load(stream)
106
 
 
 
 
 
 
107
  agent = CodeAgent(
108
  model=model,
109
- tools=[final_answer, get_weather_forecast, get_current_time_in_timezone, image_generation_tool], # Added all tools here
110
  max_steps=6,
111
  verbosity_level=1,
112
  grammar=None,
@@ -116,4 +124,13 @@ agent = CodeAgent(
116
  prompt_templates=prompt_templates
117
  )
118
 
119
- GradioUI(agent).launch()
 
 
 
 
 
 
 
 
 
 
89
 
90
  final_answer = FinalAnswerTool()
91
 
92
+ # Use the HF endpoint as suggested in the comment since there's an auth error with the direct model
 
93
  model = HfApiModel(
94
  max_tokens=2096,
95
  temperature=0.5,
96
+ model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # Using the suggested endpoint
97
  custom_role_conversions=None,
98
  )
99
 
100
  # Import tool from Hub
101
+ try:
102
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
103
+ except Exception as e:
104
+ print(f"Warning: Could not load image generation tool: {e}")
105
+ image_generation_tool = None
106
 
107
  with open("prompts.yaml", 'r') as stream:
108
  prompt_templates = yaml.safe_load(stream)
109
 
110
+ # Create a list of tools, only adding ones that are successfully loaded
111
+ tools = [final_answer, get_weather_forecast, get_current_time_in_timezone]
112
+ if image_generation_tool is not None:
113
+ tools.append(image_generation_tool)
114
+
115
  agent = CodeAgent(
116
  model=model,
117
+ tools=tools,
118
  max_steps=6,
119
  verbosity_level=1,
120
  grammar=None,
 
124
  prompt_templates=prompt_templates
125
  )
126
 
127
+ # Modified to handle potential errors in the Gradio UI
128
+ try:
129
+ GradioUI(agent).launch()
130
+ except TypeError as e:
131
+ if "unsupported operand type(s) for +=" in str(e):
132
+ print("Warning: There seems to be an issue with token counting in the Gradio UI.")
133
+ print("This could be due to the model not properly returning token count information.")
134
+ print("Consider modifying the Gradio_UI.py file to handle None values for token counts.")
135
+ else:
136
+ raise