sskorol commited on
Commit
2c2a123
·
verified ·
1 Parent(s): 8f9943c

Fixed broken gradio code

Browse files

- Fixed improper `last_input_token_count` usage before init.
- Added deps error handler

Files changed (1) hide show
  1. Gradio_UI.py +16 -14
Gradio_UI.py CHANGED
@@ -140,17 +140,14 @@ def stream_to_gradio(
140
  total_output_tokens = 0
141
 
142
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
143
- # Track tokens if model provides them
144
- if hasattr(agent.model, "last_input_token_count"):
145
- total_input_tokens += agent.model.last_input_token_count
146
- total_output_tokens += agent.model.last_output_token_count
147
  if isinstance(step_log, ActionStep):
148
  step_log.input_token_count = agent.model.last_input_token_count
149
- step_log.output_token_count = agent.model.last_output_token_count
150
 
151
- for message in pull_messages_from_step(
152
- step_log,
153
- ):
154
  yield message
155
 
156
  final_answer = step_log # Last log is the run's final_answer
@@ -191,13 +188,18 @@ class GradioUI:
191
 
192
  def interact_with_agent(self, prompt, messages):
193
  import gradio as gr
194
-
195
- messages.append(gr.ChatMessage(role="user", content=prompt))
196
- yield messages
197
- for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
198
- messages.append(msg)
 
 
 
 
 
 
199
  yield messages
200
- yield messages
201
 
202
  def upload_file(
203
  self,
 
140
  total_output_tokens = 0
141
 
142
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
143
+ if hasattr(agent.model, "last_input_token_count") and agent.model.last_input_token_count is not None:
144
+ total_input_tokens += agent.model.last_input_token_count or 0
145
+ total_output_tokens += getattr(agent.model, "last_output_token_count", 0) or 0
 
146
  if isinstance(step_log, ActionStep):
147
  step_log.input_token_count = agent.model.last_input_token_count
148
+ step_log.output_token_count = getattr(agent.model, "last_output_token_count", 0)
149
 
150
+ for message in pull_messages_from_step(step_log):
 
 
151
  yield message
152
 
153
  final_answer = step_log # Last log is the run's final_answer
 
188
 
189
  def interact_with_agent(self, prompt, messages):
190
  import gradio as gr
191
+
192
+ try:
193
+ messages.append(gr.ChatMessage(role="user", content=prompt))
194
+ yield messages
195
+ for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
196
+ messages.append(msg)
197
+ yield messages
198
+ yield messages
199
+ except Exception as e:
200
+ error_message = f"Error: {str(e)}\nPlease try again or contact support if the issue persists."
201
+ messages.append(gr.ChatMessage(role="assistant", content=error_message))
202
  yield messages
 
203
 
204
  def upload_file(
205
  self,