Spaces:
Sleeping
Sleeping
fix: Migrate to OpenAI-style messages format
Browse filesUserWarning: The 'tuples' format for chatbot messages is deprecated
π§ Changes:
- Change Chatbot type from 'tuples' to 'messages'
- Migrate from [[user, bot]] to [{role, content}] format
- Update history parsing to use OpenAI-style messages
- Change history[-3:] tuples to history[-6:] messages (last 3 turns)
β¨ Benefits:
- Future-proof against Gradio deprecation
- Compatible with OpenAI message format
- Better interoperability with other tools
- Cleaner message structure
π Format Migration:
- Old: [["user msg", "bot msg"]]
- New: [{"role": "assistant", "content": "bot msg"}]
π€ Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
app.py
CHANGED
|
@@ -91,18 +91,18 @@ def generate_response_impl(message, history):
|
|
| 91 |
current_model, current_tokenizer = load_model_once()
|
| 92 |
|
| 93 |
if current_model is None or current_tokenizer is None:
|
| 94 |
-
return history + [
|
| 95 |
|
| 96 |
# Get device
|
| 97 |
device = next(current_model.parameters()).device
|
| 98 |
|
| 99 |
# Build conversation context (last 3 turns)
|
| 100 |
conversation = ""
|
| 101 |
-
for
|
| 102 |
-
if
|
| 103 |
-
conversation += f"μ¬μ©μ: {
|
| 104 |
-
|
| 105 |
-
conversation += f"μ΄μμ€ν΄νΈ: {
|
| 106 |
|
| 107 |
conversation += f"μ¬μ©μ: {message}\nμ΄μμ€ν΄νΈ:"
|
| 108 |
|
|
@@ -142,7 +142,7 @@ def generate_response_impl(message, history):
|
|
| 142 |
if not response:
|
| 143 |
response = "μ£μ‘ν©λλ€. μλ΅μ μμ±ν μ μμμ΅λλ€."
|
| 144 |
|
| 145 |
-
return history + [
|
| 146 |
|
| 147 |
except Exception as e:
|
| 148 |
import traceback
|
|
@@ -151,7 +151,7 @@ def generate_response_impl(message, history):
|
|
| 151 |
print(f"Error: {error_msg}")
|
| 152 |
print(traceback.format_exc())
|
| 153 |
print("=" * 50)
|
| 154 |
-
return history + [
|
| 155 |
|
| 156 |
|
| 157 |
# Conditionally apply ZeroGPU decorator
|
|
@@ -207,7 +207,7 @@ with gr.Blocks(title="π€ Llama-2-Ko Chatbot") as demo:
|
|
| 207 |
|
| 208 |
gr.Markdown(header)
|
| 209 |
|
| 210 |
-
chatbot = gr.Chatbot(height=400, type="
|
| 211 |
|
| 212 |
with gr.Row():
|
| 213 |
msg = gr.Textbox(
|
|
|
|
| 91 |
current_model, current_tokenizer = load_model_once()
|
| 92 |
|
| 93 |
if current_model is None or current_tokenizer is None:
|
| 94 |
+
return history + [{"role": "assistant", "content": "β λͺ¨λΈμ λ‘λν μ μμ΅λλ€."}]
|
| 95 |
|
| 96 |
# Get device
|
| 97 |
device = next(current_model.parameters()).device
|
| 98 |
|
| 99 |
# Build conversation context (last 3 turns)
|
| 100 |
conversation = ""
|
| 101 |
+
for msg in history[-6:]: # Last 3 turns (6 messages: 3 user + 3 assistant)
|
| 102 |
+
if msg["role"] == "user":
|
| 103 |
+
conversation += f"μ¬μ©μ: {msg['content']}\n"
|
| 104 |
+
elif msg["role"] == "assistant":
|
| 105 |
+
conversation += f"μ΄μμ€ν΄νΈ: {msg['content']}\n"
|
| 106 |
|
| 107 |
conversation += f"μ¬μ©μ: {message}\nμ΄μμ€ν΄νΈ:"
|
| 108 |
|
|
|
|
| 142 |
if not response:
|
| 143 |
response = "μ£μ‘ν©λλ€. μλ΅μ μμ±ν μ μμμ΅λλ€."
|
| 144 |
|
| 145 |
+
return history + [{"role": "assistant", "content": response}]
|
| 146 |
|
| 147 |
except Exception as e:
|
| 148 |
import traceback
|
|
|
|
| 151 |
print(f"Error: {error_msg}")
|
| 152 |
print(traceback.format_exc())
|
| 153 |
print("=" * 50)
|
| 154 |
+
return history + [{"role": "assistant", "content": f"β μ€λ₯: {error_msg[:200]}"}]
|
| 155 |
|
| 156 |
|
| 157 |
# Conditionally apply ZeroGPU decorator
|
|
|
|
| 207 |
|
| 208 |
gr.Markdown(header)
|
| 209 |
|
| 210 |
+
chatbot = gr.Chatbot(height=400, type="messages", show_label=False)
|
| 211 |
|
| 212 |
with gr.Row():
|
| 213 |
msg = gr.Textbox(
|