pszemraj commited on
Commit
ee4117d
·
verified ·
1 Parent(s): 8b6b53b

revert to less terrible version

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -67,15 +67,14 @@ def build_messages(
67
  ) -> List[Dict[str, str]]:
68
  """Transform Gradio history into chat template messages.
69
 
70
- History is stored as (human_assistant, model_user) for display purposes.
71
- We extract them in the right order for the model.
72
  """
73
  messages: List[Dict[str, str]] = []
74
  if system_prompt.strip():
75
  messages.append({"role": "system", "content": system_prompt.strip()})
76
 
77
- # Each tuple is (human_assistant, model_user) for display
78
- for human_assistant, model_user in history:
79
  if model_user:
80
  messages.append({"role": "user", "content": model_user})
81
  if human_assistant:
@@ -115,8 +114,8 @@ def is_verbatim_repetition(
115
  if new_text_normalized == system_prompt.strip().lower():
116
  return True
117
 
118
- # Check against previous model user messages (second element in tuple now)
119
- for _, model_user in history:
120
  if model_user and new_text_normalized == model_user.strip().lower():
121
  return True
122
 
@@ -201,7 +200,7 @@ def respond(
201
  - If history empty: Generate first user message (ignores assistant_message input)
202
  - If history exists: Add assistant response and generate next user turn
203
 
204
- History format: (human_assistant, model_user) for correct display sides
205
  """
206
 
207
  # First message generation - ignore any text in the assistant box
@@ -218,8 +217,8 @@ def respond(
218
  top_p=top_p,
219
  )
220
 
221
- # Start conversation with first user message
222
- chat_history = [(None, user_reply)]
223
  return chat_history, chat_history
224
 
225
  # Subsequent messages - require assistant response
@@ -231,8 +230,8 @@ def respond(
231
  return chat_history, chat_history
232
 
233
  # Update the last tuple with the assistant response
234
- _, last_model_user = chat_history[-1]
235
- chat_history[-1] = (assistant_message.strip(), last_model_user)
236
 
237
  # Build messages for next user turn generation
238
  messages = build_messages(system_prompt, chat_history)
@@ -246,8 +245,8 @@ def respond(
246
  top_p=top_p,
247
  )
248
 
249
- # Add new model user message
250
- chat_history.append((None, user_reply))
251
 
252
  return chat_history, chat_history
253
 
@@ -299,7 +298,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
299
  submit_btn = gr.Button("Generate", variant="primary")
300
  clear_btn = gr.Button("Clear")
301
 
302
- state = gr.State([]) # chat history: List[Tuple[human_assistant, model_user]]
303
 
304
  with gr.Accordion("Implementation Details", open=False):
305
  gr.Markdown(
@@ -341,4 +340,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
341
  clear_btn.click(_clear, outputs=[state, system_box, chatbot, msg])
342
 
343
  if __name__ == "__main__":
344
- demo.queue().launch()
 
67
  ) -> List[Dict[str, str]]:
68
  """Transform Gradio history into chat template messages.
69
 
70
+ History is stored as (model_user, human_assistant) tuples.
 
71
  """
72
  messages: List[Dict[str, str]] = []
73
  if system_prompt.strip():
74
  messages.append({"role": "system", "content": system_prompt.strip()})
75
 
76
+ # Each tuple is (model_user, human_assistant)
77
+ for model_user, human_assistant in history:
78
  if model_user:
79
  messages.append({"role": "user", "content": model_user})
80
  if human_assistant:
 
114
  if new_text_normalized == system_prompt.strip().lower():
115
  return True
116
 
117
+ # Check against previous model user messages (first element in tuple)
118
+ for model_user, _ in history:
119
  if model_user and new_text_normalized == model_user.strip().lower():
120
  return True
121
 
 
200
  - If history empty: Generate first user message (ignores assistant_message input)
201
  - If history exists: Add assistant response and generate next user turn
202
 
203
+ History format: (model_user, human_assistant)
204
  """
205
 
206
  # First message generation - ignore any text in the assistant box
 
217
  top_p=top_p,
218
  )
219
 
220
+ # Start conversation with first user message (empty assistant slot)
221
+ chat_history = [(user_reply, None)]
222
  return chat_history, chat_history
223
 
224
  # Subsequent messages - require assistant response
 
230
  return chat_history, chat_history
231
 
232
  # Update the last tuple with the assistant response
233
+ last_model_user, _ = chat_history[-1]
234
+ chat_history[-1] = (last_model_user, assistant_message.strip())
235
 
236
  # Build messages for next user turn generation
237
  messages = build_messages(system_prompt, chat_history)
 
245
  top_p=top_p,
246
  )
247
 
248
+ # Add new model user message (with empty assistant slot)
249
+ chat_history.append((user_reply, None))
250
 
251
  return chat_history, chat_history
252
 
 
298
  submit_btn = gr.Button("Generate", variant="primary")
299
  clear_btn = gr.Button("Clear")
300
 
301
+ state = gr.State([]) # chat history: List[Tuple[model_user, human_assistant]]
302
 
303
  with gr.Accordion("Implementation Details", open=False):
304
  gr.Markdown(
 
340
  clear_btn.click(_clear, outputs=[state, system_box, chatbot, msg])
341
 
342
  if __name__ == "__main__":
343
+ demo.queue().launch()