dianacasti commited on
Commit
8677714
Β·
1 Parent(s): 5245a28

Critical fix: Remove gr.Chatbot() to avoid Gradio 4.x schema bug - use Textbox instead

Browse files
Files changed (1) hide show
  1. app.py +39 -23
app.py CHANGED
@@ -1,6 +1,7 @@
1
  """
2
  Personality Chatbot - Multi-personality LLM with LoRA adapters
3
  Deployed on Hugging Face Spaces
 
4
  """
5
 
6
  import gradio as gr
@@ -81,10 +82,10 @@ def switch_personality(personality_name):
81
  except Exception as e:
82
  return f"❌ Error loading adapter: {str(e)}"
83
 
84
- def generate_response(message, history, temperature=0.7, max_tokens=256):
85
  """Generate response using current personality"""
86
  if current_adapter is None:
87
- return "⚠️ Please select a personality first!"
88
 
89
  try:
90
  # Format with chat template
@@ -96,12 +97,12 @@ def generate_response(message, history, temperature=0.7, max_tokens=256):
96
  with torch.no_grad():
97
  outputs = current_adapter.generate(
98
  **inputs,
99
- max_new_tokens=max_tokens, # Changed from max_length to max_new_tokens
100
  temperature=temperature,
101
  do_sample=True,
102
  top_p=0.9,
103
- top_k=50, # Added top-k sampling
104
- repetition_penalty=1.1, # Added to reduce repetition
105
  pad_token_id=tokenizer.pad_token_id,
106
  eos_token_id=tokenizer.eos_token_id,
107
  )
@@ -114,10 +115,14 @@ def generate_response(message, history, temperature=0.7, max_tokens=256):
114
  response = response.replace("<|im_start|>", "").replace("<|im_end|>", "")
115
  response = response.replace("assistant\n", "").strip()
116
 
117
- return response
 
 
 
118
 
119
  except Exception as e:
120
- return f"❌ Error generating response: {str(e)}"
 
121
 
122
  def handle_personality_change(personality_name):
123
  """Handle personality dropdown change"""
@@ -128,7 +133,7 @@ def handle_personality_change(personality_name):
128
  print("πŸš€ Starting application...")
129
  load_base_model()
130
 
131
- # Create Gradio interface
132
  with gr.Blocks(theme=gr.themes.Soft(), title="Personality Chatbot") as demo:
133
  gr.Markdown(
134
  """
@@ -182,9 +187,13 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Personality Chatbot") as demo:
182
  )
183
 
184
  with gr.Column(scale=2):
185
- chatbot = gr.Chatbot(
186
- label="Chat",
187
- height=500,
 
 
 
 
188
  show_label=True,
189
  )
190
 
@@ -194,6 +203,14 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Personality Chatbot") as demo:
194
  lines=2,
195
  )
196
 
 
 
 
 
 
 
 
 
197
  with gr.Row():
198
  submit_btn = gr.Button("Send πŸ’¬", variant="primary")
199
  clear_btn = gr.Button("Clear πŸ—‘οΈ", variant="secondary")
@@ -201,11 +218,10 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Personality Chatbot") as demo:
201
  # Event handlers
202
  def respond(message, chat_history, temperature, max_tokens):
203
  if not message.strip():
204
- return chat_history, ""
205
 
206
- bot_response = generate_response(message, chat_history, temperature, max_tokens)
207
- chat_history.append((message, bot_response))
208
- return chat_history, ""
209
 
210
  # Personality change handler
211
  personality_dropdown.change(
@@ -217,21 +233,21 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Personality Chatbot") as demo:
217
  # Chat handlers
218
  submit_btn.click(
219
  fn=respond,
220
- inputs=[msg_box, chatbot, temperature_slider, max_tokens_slider],
221
- outputs=[chatbot, msg_box],
222
  )
223
 
224
  msg_box.submit(
225
  fn=respond,
226
- inputs=[msg_box, chatbot, temperature_slider, max_tokens_slider],
227
- outputs=[chatbot, msg_box],
228
  )
229
 
230
  clear_btn.click(
231
- fn=lambda: ([], ""),
232
- outputs=[chatbot, msg_box],
233
  )
234
 
235
- # Launch
236
  if __name__ == "__main__":
237
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  """
2
  Personality Chatbot - Multi-personality LLM with LoRA adapters
3
  Deployed on Hugging Face Spaces
4
+ FIXED: Removed gr.Chatbot() to avoid Gradio 4.x schema bug
5
  """
6
 
7
  import gradio as gr
 
82
  except Exception as e:
83
  return f"❌ Error loading adapter: {str(e)}"
84
 
85
+ def generate_response(message, chat_history, temperature=0.7, max_tokens=256):
86
  """Generate response using current personality"""
87
  if current_adapter is None:
88
+ return "⚠️ Please select a personality first!", chat_history
89
 
90
  try:
91
  # Format with chat template
 
97
  with torch.no_grad():
98
  outputs = current_adapter.generate(
99
  **inputs,
100
+ max_new_tokens=max_tokens,
101
  temperature=temperature,
102
  do_sample=True,
103
  top_p=0.9,
104
+ top_k=50,
105
+ repetition_penalty=1.1,
106
  pad_token_id=tokenizer.pad_token_id,
107
  eos_token_id=tokenizer.eos_token_id,
108
  )
 
115
  response = response.replace("<|im_start|>", "").replace("<|im_end|>", "")
116
  response = response.replace("assistant\n", "").strip()
117
 
118
+ # Format chat history for display
119
+ new_history = chat_history + f"\n\n**You:** {message}\n\n**Bot ({current_personality}):** {response}\n\n---"
120
+
121
+ return response, new_history
122
 
123
  except Exception as e:
124
+ error_msg = f"❌ Error generating response: {str(e)}"
125
+ return error_msg, chat_history
126
 
127
  def handle_personality_change(personality_name):
128
  """Handle personality dropdown change"""
 
133
  print("πŸš€ Starting application...")
134
  load_base_model()
135
 
136
+ # Create Gradio interface WITHOUT gr.Chatbot() to avoid schema bug
137
  with gr.Blocks(theme=gr.themes.Soft(), title="Personality Chatbot") as demo:
138
  gr.Markdown(
139
  """
 
187
  )
188
 
189
  with gr.Column(scale=2):
190
+ # FIXED: Use Textbox instead of Chatbot to avoid schema bug
191
+ chat_display = gr.Textbox(
192
+ label="πŸ’¬ Chat History",
193
+ value="",
194
+ lines=20,
195
+ max_lines=30,
196
+ interactive=False,
197
  show_label=True,
198
  )
199
 
 
203
  lines=2,
204
  )
205
 
206
+ last_response = gr.Textbox(
207
+ label="πŸ€– Last Response",
208
+ value="",
209
+ lines=5,
210
+ interactive=False,
211
+ show_label=True,
212
+ )
213
+
214
  with gr.Row():
215
  submit_btn = gr.Button("Send πŸ’¬", variant="primary")
216
  clear_btn = gr.Button("Clear πŸ—‘οΈ", variant="secondary")
 
218
  # Event handlers
219
  def respond(message, chat_history, temperature, max_tokens):
220
  if not message.strip():
221
+ return "", chat_history, ""
222
 
223
+ bot_response, new_history = generate_response(message, chat_history, temperature, max_tokens)
224
+ return "", new_history, bot_response
 
225
 
226
  # Personality change handler
227
  personality_dropdown.change(
 
233
  # Chat handlers
234
  submit_btn.click(
235
  fn=respond,
236
+ inputs=[msg_box, chat_display, temperature_slider, max_tokens_slider],
237
+ outputs=[msg_box, chat_display, last_response],
238
  )
239
 
240
  msg_box.submit(
241
  fn=respond,
242
+ inputs=[msg_box, chat_display, temperature_slider, max_tokens_slider],
243
+ outputs=[msg_box, chat_display, last_response],
244
  )
245
 
246
  clear_btn.click(
247
+ fn=lambda: ("", "", ""),
248
+ outputs=[chat_display, msg_box, last_response],
249
  )
250
 
251
+ # Launch with share=True for HF Spaces
252
  if __name__ == "__main__":
253
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)