alex4cip Claude commited on
Commit
2384d41
Β·
1 Parent(s): 7cf114c

fix: Migrate to OpenAI-style messages format

Browse files

UserWarning: The 'tuples' format for chatbot messages is deprecated

πŸ”§ Changes:
- Change Chatbot type from 'tuples' to 'messages'
- Migrate from [[user, bot]] to [{role, content}] format
- Update history parsing to use OpenAI-style messages
- Change history[-3:] tuples to history[-6:] messages (last 3 turns)

✨ Benefits:
- Future-proof against Gradio deprecation
- Compatible with OpenAI message format
- Better interoperability with other tools
- Cleaner message structure

πŸ”„ Format Migration:
- Old: [["user msg", "bot msg"]]
- New: [{"role": "assistant", "content": "bot msg"}]

πŸ€– Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -91,18 +91,18 @@ def generate_response_impl(message, history):
91
  current_model, current_tokenizer = load_model_once()
92
 
93
  if current_model is None or current_tokenizer is None:
94
- return history + [[message, "❌ λͺ¨λΈμ„ λ‘œλ“œν•  수 μ—†μŠ΅λ‹ˆλ‹€."]]
95
 
96
  # Get device
97
  device = next(current_model.parameters()).device
98
 
99
  # Build conversation context (last 3 turns)
100
  conversation = ""
101
- for user_msg, bot_msg in history[-3:]:
102
- if user_msg:
103
- conversation += f"μ‚¬μš©μž: {user_msg}\n"
104
- if bot_msg:
105
- conversation += f"μ–΄μ‹œμŠ€ν„΄νŠΈ: {bot_msg}\n"
106
 
107
  conversation += f"μ‚¬μš©μž: {message}\nμ–΄μ‹œμŠ€ν„΄νŠΈ:"
108
 
@@ -142,7 +142,7 @@ def generate_response_impl(message, history):
142
  if not response:
143
  response = "μ£„μ†‘ν•©λ‹ˆλ‹€. 응닡을 생성할 수 μ—†μ—ˆμŠ΅λ‹ˆλ‹€."
144
 
145
- return history + [[message, response]]
146
 
147
  except Exception as e:
148
  import traceback
@@ -151,7 +151,7 @@ def generate_response_impl(message, history):
151
  print(f"Error: {error_msg}")
152
  print(traceback.format_exc())
153
  print("=" * 50)
154
- return history + [[message, f"❌ 였λ₯˜: {error_msg[:200]}"]]
155
 
156
 
157
  # Conditionally apply ZeroGPU decorator
@@ -207,7 +207,7 @@ with gr.Blocks(title="πŸ€– Llama-2-Ko Chatbot") as demo:
207
 
208
  gr.Markdown(header)
209
 
210
- chatbot = gr.Chatbot(height=400, type="tuples", show_label=False)
211
 
212
  with gr.Row():
213
  msg = gr.Textbox(
 
91
  current_model, current_tokenizer = load_model_once()
92
 
93
  if current_model is None or current_tokenizer is None:
94
+ return history + [{"role": "assistant", "content": "❌ λͺ¨λΈμ„ λ‘œλ“œν•  수 μ—†μŠ΅λ‹ˆλ‹€."}]
95
 
96
  # Get device
97
  device = next(current_model.parameters()).device
98
 
99
  # Build conversation context (last 3 turns)
100
  conversation = ""
101
+ for msg in history[-6:]: # Last 3 turns (6 messages: 3 user + 3 assistant)
102
+ if msg["role"] == "user":
103
+ conversation += f"μ‚¬μš©μž: {msg['content']}\n"
104
+ elif msg["role"] == "assistant":
105
+ conversation += f"μ–΄μ‹œμŠ€ν„΄νŠΈ: {msg['content']}\n"
106
 
107
  conversation += f"μ‚¬μš©μž: {message}\nμ–΄μ‹œμŠ€ν„΄νŠΈ:"
108
 
 
142
  if not response:
143
  response = "μ£„μ†‘ν•©λ‹ˆλ‹€. 응닡을 생성할 수 μ—†μ—ˆμŠ΅λ‹ˆλ‹€."
144
 
145
+ return history + [{"role": "assistant", "content": response}]
146
 
147
  except Exception as e:
148
  import traceback
 
151
  print(f"Error: {error_msg}")
152
  print(traceback.format_exc())
153
  print("=" * 50)
154
+ return history + [{"role": "assistant", "content": f"❌ 였λ₯˜: {error_msg[:200]}"}]
155
 
156
 
157
  # Conditionally apply ZeroGPU decorator
 
207
 
208
  gr.Markdown(header)
209
 
210
+ chatbot = gr.Chatbot(height=400, type="messages", show_label=False)
211
 
212
  with gr.Row():
213
  msg = gr.Textbox(