agarwalamit081 commited on
Commit
d31e1c0
·
verified ·
1 Parent(s): 31bf409

Update Gradio_UI.py

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +160 -19
Gradio_UI.py CHANGED
@@ -1,5 +1,9 @@
1
  #!/usr/bin/env python
2
  # coding=utf-8
 
 
 
 
3
  import os
4
  import re
5
  from typing import Optional, List
@@ -8,18 +12,27 @@ from smolagents.agents import ActionStep, MultiStepAgent
8
  from smolagents.memory import MemoryStep
9
  from smolagents.utils import _is_package_available
10
 
 
11
  def pull_messages_from_step(step_log: MemoryStep):
12
  """Extract ChatMessage objects from agent steps with proper nesting"""
 
 
 
13
  import gradio as gr
 
14
  if isinstance(step_log, ActionStep):
 
15
  step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else "Processing"
16
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
17
 
18
- # Show LLM reasoning
19
  if hasattr(step_log, "model_output") and step_log.model_output:
20
- model_output = re.sub(r"```\s*<end_code>.*", "```", step_log.model_output.strip())
 
 
21
  model_output = re.sub(r"<end_code>\s*```", "```", model_output)
22
- if model_output.strip():
 
23
  yield gr.ChatMessage(role="assistant", content=model_output)
24
 
25
  parent_id = None
@@ -28,15 +41,18 @@ def pull_messages_from_step(step_log: MemoryStep):
28
  if hasattr(step_log, "tool_calls") and step_log.tool_calls:
29
  tool_call = step_log.tool_calls[0]
30
  parent_id = f"tool_{step_log.step_number}"
 
 
31
  args = tool_call.arguments
32
  if isinstance(args, dict):
33
  content = "\n".join(f"• {k}: {v}" for k, v in args.items() if v and k != 'self')
34
  else:
35
  content = str(args).strip()
 
36
  metadata = {
37
  "title": f"🛠️ Using: {tool_call.name}",
38
  "id": parent_id,
39
- "status": "pending", # FIXED: "running" "pending" (Gradio validation)
40
  }
41
  yield gr.ChatMessage(role="assistant", content=content, metadata=metadata)
42
 
@@ -44,29 +60,42 @@ def pull_messages_from_step(step_log: MemoryStep):
44
  if hasattr(step_log, "observations") and step_log.observations:
45
  obs = step_log.observations.strip()
46
  if obs and not obs.startswith("Execution logs:"):
47
- metadata = {"title": "✅ Result", "status": "done"} # FIXED: added status
 
 
 
48
  if parent_id is not None:
49
  metadata["parent_id"] = parent_id
50
  yield gr.ChatMessage(role="assistant", content=obs, metadata=metadata)
51
 
52
  # Show errors
53
  if hasattr(step_log, "error") and step_log.error:
54
- metadata = {"title": "⚠️ Warning", "status": "done"} # FIXED: added status
 
 
 
55
  if parent_id is not None:
56
  metadata["parent_id"] = parent_id
57
  yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata=metadata)
58
 
59
- # Step footer
60
  footer_parts = [step_number]
61
  if hasattr(step_log, "duration") and step_log.duration:
62
  footer_parts.append(f"⏱️ {float(step_log.duration):.1f}s")
63
  if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
64
  footer_parts.append(f"💬 {step_log.input_token_count + step_log.output_token_count:,} tokens")
 
65
  yield gr.ChatMessage(
66
  role="assistant",
67
  content=f'<span style="color: #888; font-size: 0.85em;">{" | ".join(footer_parts)}</span>',
68
  )
69
- yield gr.ChatMessage(role="assistant", content='<hr style="margin: 8px 0; border: 0; border-top: 1px solid #eee">')
 
 
 
 
 
 
70
 
71
  def stream_to_gradio(
72
  agent: MultiStepAgent,
@@ -74,64 +103,166 @@ def stream_to_gradio(
74
  reset_agent_memory: bool = False,
75
  additional_args: Optional[dict] = None,
76
  ):
77
- """Runs agent and streams messages as gradio ChatMessages"""
 
 
 
 
 
 
 
 
 
 
 
78
  if not _is_package_available("gradio"):
79
  raise ModuleNotFoundError("Install gradio: `pip install 'smolagents[gradio]'`")
 
80
  import gradio as gr
 
81
  try:
 
82
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
83
  if isinstance(step_log, ActionStep):
 
84
  if hasattr(agent.model, "last_input_token_count") and hasattr(agent.model, "last_output_token_count"):
85
  step_log.input_token_count = agent.model.last_input_token_count
86
  step_log.output_token_count = agent.model.last_output_token_count
 
 
87
  for message in pull_messages_from_step(step_log):
88
  yield message
 
 
89
  final = handle_agent_output_types(step_log)
90
  if isinstance(final, AgentText):
91
  content = final.to_string()
92
- yield gr.ChatMessage(role="assistant", content=content, metadata={"react": True, "status": "done"})
 
 
 
 
93
  else:
94
- yield gr.ChatMessage(role="assistant", content=f"**Final Answer:** {str(final)}", metadata={"status": "done"})
 
 
 
 
 
95
  except Exception as e:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  yield gr.ChatMessage(
97
  role="assistant",
98
- content=f"⚠️ **Error during planning:** {str(e)}\nPlease try again with a clearer trip description (include destination, dates, origin city, and budget).",
99
  metadata={"status": "done"}
100
  )
101
 
 
102
  class GradioUI:
103
  """Production-ready Gradio interface for travel agent"""
 
104
  def __init__(self, agent: MultiStepAgent, file_upload_folder: Optional[str] = None):
 
 
 
 
 
 
 
105
  if not _is_package_available("gradio"):
106
  raise ModuleNotFoundError("Install gradio: `pip install 'smolagents[gradio]'`")
 
107
  self.agent = agent
108
  self.file_upload_folder = file_upload_folder
 
 
109
  if self.file_upload_folder and not os.path.exists(file_upload_folder):
110
  os.makedirs(file_upload_folder, exist_ok=True)
111
 
112
  def interact_with_agent(self, prompt: str, history: List):
 
 
 
 
 
 
 
 
 
 
113
  import gradio as gr
 
 
114
  history.append(gr.ChatMessage(role="user", content=prompt))
115
  yield history
 
 
116
  for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
117
  history.append(msg)
118
  yield history
119
 
120
  def launch(self, **kwargs):
 
 
 
 
 
 
121
  import gradio as gr
122
- with gr.Blocks(title="✈️ Travel Catalogue Creator", fill_height=True, theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
123
  gr.Markdown("# ✈️ Smart Travel Catalogue Creator")
124
- gr.Markdown("Plan your trip with AI: weather forecasts, custom itineraries, packing lists & visual inspiration")
 
 
 
 
 
125
  chatbot = gr.Chatbot(
126
  label="Your Travel Assistant",
127
  type="messages",
128
  avatar_images=(
129
- None,
130
- "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
131
  ),
132
  height=600,
133
  show_copy_button=True,
134
  )
 
 
135
  with gr.Row():
136
  text_input = gr.Textbox(
137
  label="Describe your trip",
@@ -139,23 +270,33 @@ class GradioUI:
139
  lines=2,
140
  )
141
  submit_btn = gr.Button("Plan My Trip", variant="primary")
 
 
142
  gr.Markdown("""
143
  ### 💡 Tips for best results:
144
- • Include: destination, dates, origin city, budget amount + currency
145
- • Example: *"5-day Lisbon trip from London, Sep 20-24, budget £800 GBP"*
 
146
  """)
 
 
147
  submit_btn.click(
148
  self.interact_with_agent,
149
  inputs=[text_input, chatbot],
150
  outputs=[chatbot],
151
  show_progress="full",
152
  )
 
153
  text_input.submit(
154
  self.interact_with_agent,
155
  inputs=[text_input, chatbot],
156
  outputs=[chatbot],
157
  show_progress="full",
158
  )
 
 
159
  demo.launch(**kwargs)
160
 
161
- __all__ = ["stream_to_gradio", "GradioUI"]
 
 
 
1
  #!/usr/bin/env python
2
  # coding=utf-8
3
+ """
4
+ Gradio UI for Travel Catalogue Creator
5
+ Production-ready interface with streaming agent responses
6
+ """
7
  import os
8
  import re
9
  from typing import Optional, List
 
12
  from smolagents.memory import MemoryStep
13
  from smolagents.utils import _is_package_available
14
 
15
+
16
  def pull_messages_from_step(step_log: MemoryStep):
17
  """Extract ChatMessage objects from agent steps with proper nesting"""
18
+ if not _is_package_available("gradio"):
19
+ raise ModuleNotFoundError("Install gradio: `pip install 'smolagents[gradio]'`")
20
+
21
  import gradio as gr
22
+
23
  if isinstance(step_log, ActionStep):
24
+ # Step header
25
  step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else "Processing"
26
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
27
 
28
+ # Show LLM reasoning/thinking
29
  if hasattr(step_log, "model_output") and step_log.model_output:
30
+ model_output = step_log.model_output.strip()
31
+ # Clean up code blocks
32
+ model_output = re.sub(r"```\s*<end_code>.*", "```", model_output)
33
  model_output = re.sub(r"<end_code>\s*```", "```", model_output)
34
+
35
+ if model_output:
36
  yield gr.ChatMessage(role="assistant", content=model_output)
37
 
38
  parent_id = None
 
41
  if hasattr(step_log, "tool_calls") and step_log.tool_calls:
42
  tool_call = step_log.tool_calls[0]
43
  parent_id = f"tool_{step_log.step_number}"
44
+
45
+ # Format tool arguments
46
  args = tool_call.arguments
47
  if isinstance(args, dict):
48
  content = "\n".join(f"• {k}: {v}" for k, v in args.items() if v and k != 'self')
49
  else:
50
  content = str(args).strip()
51
+
52
  metadata = {
53
  "title": f"🛠️ Using: {tool_call.name}",
54
  "id": parent_id,
55
+ "status": "pending", # Gradio requires "pending", not "running"
56
  }
57
  yield gr.ChatMessage(role="assistant", content=content, metadata=metadata)
58
 
 
60
  if hasattr(step_log, "observations") and step_log.observations:
61
  obs = step_log.observations.strip()
62
  if obs and not obs.startswith("Execution logs:"):
63
+ metadata = {
64
+ "title": "✅ Result",
65
+ "status": "done"
66
+ }
67
  if parent_id is not None:
68
  metadata["parent_id"] = parent_id
69
  yield gr.ChatMessage(role="assistant", content=obs, metadata=metadata)
70
 
71
  # Show errors
72
  if hasattr(step_log, "error") and step_log.error:
73
+ metadata = {
74
+ "title": "⚠️ Warning",
75
+ "status": "done"
76
+ }
77
  if parent_id is not None:
78
  metadata["parent_id"] = parent_id
79
  yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata=metadata)
80
 
81
+ # Step footer with timing and token info
82
  footer_parts = [step_number]
83
  if hasattr(step_log, "duration") and step_log.duration:
84
  footer_parts.append(f"⏱️ {float(step_log.duration):.1f}s")
85
  if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
86
  footer_parts.append(f"💬 {step_log.input_token_count + step_log.output_token_count:,} tokens")
87
+
88
  yield gr.ChatMessage(
89
  role="assistant",
90
  content=f'<span style="color: #888; font-size: 0.85em;">{" | ".join(footer_parts)}</span>',
91
  )
92
+
93
+ # Divider between steps
94
+ yield gr.ChatMessage(
95
+ role="assistant",
96
+ content='<hr style="margin: 8px 0; border: 0; border-top: 1px solid #eee">'
97
+ )
98
+
99
 
100
  def stream_to_gradio(
101
  agent: MultiStepAgent,
 
103
  reset_agent_memory: bool = False,
104
  additional_args: Optional[dict] = None,
105
  ):
106
+ """
107
+ Runs agent and streams messages as gradio ChatMessages.
108
+
109
+ Args:
110
+ agent: The MultiStepAgent instance to run
111
+ task: User's task/query string
112
+ reset_agent_memory: Whether to clear agent memory before running
113
+ additional_args: Optional additional arguments for the agent
114
+
115
+ Yields:
116
+ Gradio ChatMessage objects for UI display
117
+ """
118
  if not _is_package_available("gradio"):
119
  raise ModuleNotFoundError("Install gradio: `pip install 'smolagents[gradio]'`")
120
+
121
  import gradio as gr
122
+
123
  try:
124
+ # Run agent and stream steps
125
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
126
  if isinstance(step_log, ActionStep):
127
+ # Track token usage if available
128
  if hasattr(agent.model, "last_input_token_count") and hasattr(agent.model, "last_output_token_count"):
129
  step_log.input_token_count = agent.model.last_input_token_count
130
  step_log.output_token_count = agent.model.last_output_token_count
131
+
132
+ # Yield messages from this step
133
  for message in pull_messages_from_step(step_log):
134
  yield message
135
+
136
+ # Handle final output
137
  final = handle_agent_output_types(step_log)
138
  if isinstance(final, AgentText):
139
  content = final.to_string()
140
+ yield gr.ChatMessage(
141
+ role="assistant",
142
+ content=content,
143
+ metadata={"react": True, "status": "done"}
144
+ )
145
  else:
146
+ yield gr.ChatMessage(
147
+ role="assistant",
148
+ content=f"**Final Answer:** {str(final)}",
149
+ metadata={"status": "done"}
150
+ )
151
+
152
  except Exception as e:
153
+ # Handle errors gracefully
154
+ error_msg = str(e)
155
+
156
+ # Provide helpful error messages
157
+ if "500 Internal Server Error" in error_msg or "Bad Request" in error_msg:
158
+ helpful_msg = (
159
+ "⚠️ **API Error:** The model service encountered an issue.\n\n"
160
+ "**Possible fixes:**\n"
161
+ "• Try rephrasing your request more clearly\n"
162
+ "• Include all required details: destination, dates, origin city, budget\n"
163
+ "• Example: *'5-day Barcelona trip from NYC, Oct 15-19, budget $1500 USD'*\n\n"
164
+ f"Technical details: {error_msg}"
165
+ )
166
+ elif "No results found" in error_msg:
167
+ helpful_msg = (
168
+ "⚠️ **Search Issue:** Couldn't find information about the destination.\n\n"
169
+ "Please try again with a different destination or check the spelling."
170
+ )
171
+ else:
172
+ helpful_msg = (
173
+ f"⚠️ **Error:** {error_msg}\n\n"
174
+ "Please try again with a clearer trip description including:\n"
175
+ "• Destination city\n"
176
+ "• Travel dates\n"
177
+ "• Origin city\n"
178
+ "• Budget amount + currency"
179
+ )
180
+
181
  yield gr.ChatMessage(
182
  role="assistant",
183
+ content=helpful_msg,
184
  metadata={"status": "done"}
185
  )
186
 
187
+
188
  class GradioUI:
189
  """Production-ready Gradio interface for travel agent"""
190
+
191
  def __init__(self, agent: MultiStepAgent, file_upload_folder: Optional[str] = None):
192
+ """
193
+ Initialize Gradio UI wrapper.
194
+
195
+ Args:
196
+ agent: The MultiStepAgent instance to use
197
+ file_upload_folder: Optional folder path for file uploads
198
+ """
199
  if not _is_package_available("gradio"):
200
  raise ModuleNotFoundError("Install gradio: `pip install 'smolagents[gradio]'`")
201
+
202
  self.agent = agent
203
  self.file_upload_folder = file_upload_folder
204
+
205
+ # Create upload folder if specified
206
  if self.file_upload_folder and not os.path.exists(file_upload_folder):
207
  os.makedirs(file_upload_folder, exist_ok=True)
208
 
209
  def interact_with_agent(self, prompt: str, history: List):
210
+ """
211
+ Handle user interaction with the agent.
212
+
213
+ Args:
214
+ prompt: User's input text
215
+ history: Conversation history
216
+
217
+ Yields:
218
+ Updated conversation history
219
+ """
220
  import gradio as gr
221
+
222
+ # Add user message to history
223
  history.append(gr.ChatMessage(role="user", content=prompt))
224
  yield history
225
+
226
+ # Stream agent responses
227
  for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
228
  history.append(msg)
229
  yield history
230
 
231
  def launch(self, **kwargs):
232
+ """
233
+ Launch the Gradio interface.
234
+
235
+ Args:
236
+ **kwargs: Additional arguments passed to demo.launch()
237
+ """
238
  import gradio as gr
239
+
240
+ # Build UI
241
+ with gr.Blocks(
242
+ title="✈️ Travel Catalogue Creator",
243
+ fill_height=True,
244
+ theme=gr.themes.Soft()
245
+ ) as demo:
246
+ # Header
247
  gr.Markdown("# ✈️ Smart Travel Catalogue Creator")
248
+ gr.Markdown(
249
+ "Plan your perfect trip with AI: weather forecasts, custom itineraries, "
250
+ "packing lists & visual inspiration"
251
+ )
252
+
253
+ # Chat interface
254
  chatbot = gr.Chatbot(
255
  label="Your Travel Assistant",
256
  type="messages",
257
  avatar_images=(
258
+ None, # User avatar (default)
259
+ "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png", # Agent avatar
260
  ),
261
  height=600,
262
  show_copy_button=True,
263
  )
264
+
265
+ # Input area
266
  with gr.Row():
267
  text_input = gr.Textbox(
268
  label="Describe your trip",
 
270
  lines=2,
271
  )
272
  submit_btn = gr.Button("Plan My Trip", variant="primary")
273
+
274
+ # Help text
275
  gr.Markdown("""
276
  ### 💡 Tips for best results:
277
+ **Include:** destination, dates, origin city, budget amount + currency
278
+ **Example:** *"5-day Lisbon trip from London, Sep 20-24, budget £800 GBP"*
279
+ • **Example:** *"Weekend Paris getaway from Amsterdam, March 10-12, €600 budget"*
280
  """)
281
+
282
+ # Connect interactions
283
  submit_btn.click(
284
  self.interact_with_agent,
285
  inputs=[text_input, chatbot],
286
  outputs=[chatbot],
287
  show_progress="full",
288
  )
289
+
290
  text_input.submit(
291
  self.interact_with_agent,
292
  inputs=[text_input, chatbot],
293
  outputs=[chatbot],
294
  show_progress="full",
295
  )
296
+
297
+ # Launch
298
  demo.launch(**kwargs)
299
 
300
+
301
+ # Export public API
302
+ __all__ = ["stream_to_gradio", "GradioUI"]