SimpleSam commited on
Commit
791b8b0
·
verified ·
1 Parent(s): b42fe0a

Update Manyata_UI.py

Browse files
Files changed (1) hide show
  1. Manyata_UI.py +102 -12
Manyata_UI.py CHANGED
@@ -31,6 +31,96 @@ def pull_messages_from_step(
31
  """Extract ChatMessage objects from agent steps with proper nesting"""
32
  import gradio as gr
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
 
36
  def stream_to_gradio(
@@ -49,19 +139,19 @@ def stream_to_gradio(
49
  total_input_tokens = 0
50
  total_output_tokens = 0
51
 
52
- # for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
53
  # Track tokens if model provides them
54
- # if hasattr(agent.model, "last_input_token_count"):
55
- # total_input_tokens += agent.model.last_input_token_count
56
- # total_output_tokens += agent.model.last_output_token_count
57
- # if isinstance(step_log, ActionStep):
58
- # step_log.input_token_count = agent.model.last_input_token_count
59
- # step_log.output_token_count = agent.model.last_output_token_count
60
- #
61
- # for message in pull_messages_from_step(
62
- # step_log,
63
- # ):
64
- # yield message
65
 
66
  final_answer = step_log # Last log is the run's final_answer
67
  final_answer = handle_agent_output_types(final_answer)
 
31
  """Extract ChatMessage objects from agent steps with proper nesting"""
32
  import gradio as gr
33
 
34
+ if isinstance(step_log, ActionStep):
35
+ # Output the step number
36
+ step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
37
+ yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
38
+
39
+ # First yield the thought/reasoning from the LLM
40
+ if hasattr(step_log, "model_output") and step_log.model_output is not None:
41
+ # Clean up the LLM output
42
+ model_output = step_log.model_output.strip()
43
+ # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
44
+ model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
45
+ model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
46
+ model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
47
+ model_output = model_output.strip()
48
+ yield gr.ChatMessage(role="assistant", content=model_output)
49
+
50
+ # For tool calls, create a parent message
51
+ if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
52
+ first_tool_call = step_log.tool_calls[0]
53
+ used_code = first_tool_call.name == "python_interpreter"
54
+ parent_id = f"call_{len(step_log.tool_calls)}"
55
+
56
+ # Tool call becomes the parent message with timing info
57
+ # First we will handle arguments based on type
58
+ args = first_tool_call.arguments
59
+ if isinstance(args, dict):
60
+ content = str(args.get("answer", str(args)))
61
+ else:
62
+ content = str(args).strip()
63
+
64
+ if used_code:
65
+ # Clean up the content by removing any end code tags
66
+ content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
67
+ content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
68
+ content = content.strip()
69
+ if not content.startswith("```python"):
70
+ content = f"```python\n{content}\n```"
71
+
72
+ parent_message_tool = gr.ChatMessage(
73
+ role="assistant",
74
+ content=content,
75
+ metadata={
76
+ "title": f"🛠️ Used tool {first_tool_call.name}",
77
+ "id": parent_id,
78
+ "status": "pending",
79
+ },
80
+ )
81
+ yield parent_message_tool
82
+
83
+ # Nesting execution logs under the tool call if they exist
84
+ if hasattr(step_log, "observations") and (
85
+ step_log.observations is not None and step_log.observations.strip()
86
+ ): # Only yield execution logs if there's actual content
87
+ log_content = step_log.observations.strip()
88
+ if log_content:
89
+ log_content = re.sub(r"^Execution logs:\s*", "", log_content)
90
+ yield gr.ChatMessage(
91
+ role="assistant",
92
+ content=f"{log_content}",
93
+ metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
94
+ )
95
+
96
+ # Nesting any errors under the tool call
97
+ if hasattr(step_log, "error") and step_log.error is not None:
98
+ yield gr.ChatMessage(
99
+ role="assistant",
100
+ content=str(step_log.error),
101
+ metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
102
+ )
103
+
104
+ # Update parent message metadata to done status without yielding a new message
105
+ parent_message_tool.metadata["status"] = "done"
106
+
107
+ # Handle standalone errors but not from tool calls
108
+ elif hasattr(step_log, "error") and step_log.error is not None:
109
+ yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
110
+
111
+ # Calculate duration and token information
112
+ step_footnote = f"{step_number}"
113
+ if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
114
+ token_str = (
115
+ f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
116
+ )
117
+ step_footnote += token_str
118
+ if hasattr(step_log, "duration"):
119
+ step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
120
+ step_footnote += step_duration
121
+ step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
122
+ yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
123
+ yield gr.ChatMessage(role="assistant", content="-----")
124
 
125
 
126
  def stream_to_gradio(
 
139
  total_input_tokens = 0
140
  total_output_tokens = 0
141
 
142
+ for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
143
  # Track tokens if model provides them
144
+ if hasattr(agent.model, "last_input_token_count"):
145
+ total_input_tokens += agent.model.last_input_token_count
146
+ total_output_tokens += agent.model.last_output_token_count
147
+ if isinstance(step_log, ActionStep):
148
+ step_log.input_token_count = agent.model.last_input_token_count
149
+ step_log.output_token_count = agent.model.last_output_token_count
150
+
151
+ for message in pull_messages_from_step(
152
+ step_log,
153
+ ):
154
+ yield message
155
 
156
  final_answer = step_log # Last log is the run's final_answer
157
  final_answer = handle_agent_output_types(final_answer)