CazC commited on
Commit
5b7a8a4
·
verified ·
1 Parent(s): 7c54367

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -18
app.py CHANGED
@@ -173,7 +173,7 @@ DEFAULT_CHARACTER = Character.model_validate_json(DEFAULT_CHARACTER_JSON)
173
  DECK_PATH = Path(__file__).with_name("tacklebox_deck.json")
174
  DECK_DATA = json.loads(DECK_PATH.read_text(encoding="utf-8"))
175
 
176
- ChatHistory = List[Tuple[str, str]]
177
 
178
 
179
  def fresh_deck() -> TackleboxDeck:
@@ -198,14 +198,58 @@ def get_openai_client(api_key: Optional[str]) -> OpenAI:
198
  return OpenAI(api_key=key)
199
 
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  def normalize_history(history: Optional[List[Any]]) -> ChatHistory:
202
- """Coerce Gradio chat history into a predictable list of string pairs."""
203
- normalized: ChatHistory = []
204
- for turn in history or []:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  if not isinstance(turn, (list, tuple)) or len(turn) != 2:
206
  continue
207
  user_turn, assistant_turn = turn
208
- normalized.append((str(user_turn or ""), str(assistant_turn or "")))
 
 
 
209
  return normalized
210
 
211
 
@@ -446,11 +490,15 @@ def generate_model_reply(
446
  base_prompt = append_ai_hand_to_prompt(persona_prompt, ai_hand)
447
  messages: List[Dict[str, Any]] = [{"role": "system", "content": base_prompt}]
448
 
449
- for user_turn, assistant_turn in history:
450
- if user_turn:
451
- messages.append({"role": "user", "content": user_turn})
452
- if assistant_turn:
453
- messages.append({"role": "assistant", "content": assistant_turn})
 
 
 
 
454
 
455
  tool_output_note = ""
456
  if tool_results.get("tool_outputs"):
@@ -712,7 +760,7 @@ def grab_card_for_player(
712
  if not card:
713
  status = hand_status_md(deck, player_hand) + " • **No more cards** in the tackle box."
714
  notice = "*You reached for the tacklebox, but it was empty.*"
715
- chat_history.append((notice, ""))
716
  return render_hand_html(player_hand), status, chat_history, deck, player_hand, ai_hand
717
 
718
  player_hand.add_card(card)
@@ -732,7 +780,7 @@ def grab_card_for_player(
732
  notice_lines.append(shared_with_ai_notice)
733
 
734
  notice = "\n".join(notice_lines)
735
- chat_history.append((notice, ""))
736
  return render_hand_html(player_hand), status, chat_history, deck, player_hand, ai_hand
737
 
738
 
@@ -742,6 +790,18 @@ def show_ai_hand_debug(ai_hand_state: Optional[Hand]) -> str:
742
  return format_ai_hand_debug(ai_hand)
743
 
744
 
 
 
 
 
 
 
 
 
 
 
 
 
745
  def reset_session() -> Tuple[str, str, ChatHistory, TackleboxDeck, Hand, Hand]:
746
  hand_html, status, chat_history, deck, player_hand, ai_hand = init_session()
747
  return hand_html, status + " • Reset.", chat_history, deck, player_hand, ai_hand
@@ -904,8 +964,22 @@ with gr.Blocks(title="Tacklebox - Player 2 Customizer", css=CUSTOM_CSS) as demo:
904
  reset_btn = gr.Button("Reset session", variant="secondary")
905
 
906
  with gr.Group(elem_classes=["section-card"]):
 
 
 
 
 
 
 
 
 
 
 
907
  chat = gr.ChatInterface(
908
  orchestrate_chat,
 
 
 
909
  additional_inputs=[
910
  persona_prompt_state,
911
  api_key_state,
@@ -922,13 +996,16 @@ with gr.Blocks(title="Tacklebox - Player 2 Customizer", css=CUSTOM_CSS) as demo:
922
  ],
923
  title="Tacklebox Chat",
924
  description="Robin chats with Player 2 (LLM persona).",
925
- examples=[
926
- ["Hey, how does the water look today?"],
927
- ["What do you think we might catch?"],
928
- ["Remember fishing when we were young?"],
929
- ],
930
  )
931
 
 
 
 
 
932
  apply_btn.click(
933
  update_player_two_from_form,
934
  inputs=[
@@ -976,4 +1053,9 @@ with gr.Blocks(title="Tacklebox - Player 2 Customizer", css=CUSTOM_CSS) as demo:
976
 
977
 
978
  if __name__ == "__main__":
979
- demo.launch()
 
 
 
 
 
 
173
  DECK_PATH = Path(__file__).with_name("tacklebox_deck.json")
174
  DECK_DATA = json.loads(DECK_PATH.read_text(encoding="utf-8"))
175
 
176
+ ChatHistory = List[Dict[str, Any]]
177
 
178
 
179
  def fresh_deck() -> TackleboxDeck:
 
198
  return OpenAI(api_key=key)
199
 
200
 
201
+ def content_to_text(content: Any) -> str:
202
+ """Extract displayable text from Gradio/OpenAI-style content structures."""
203
+ if content is None:
204
+ return ""
205
+ if isinstance(content, str):
206
+ return content
207
+ if isinstance(content, list):
208
+ parts: List[str] = []
209
+ for part in content:
210
+ if isinstance(part, dict):
211
+ if part.get("type") == "text" and "text" in part:
212
+ parts.append(str(part["text"]))
213
+ elif "text" in part:
214
+ parts.append(str(part["text"]))
215
+ elif part is not None:
216
+ parts.append(str(part))
217
+ return "\n".join(p for p in parts if p)
218
+ if isinstance(content, dict):
219
+ if content.get("type") == "text" and "text" in content:
220
+ return str(content["text"])
221
+ if "text" in content:
222
+ return str(content["text"])
223
+ return ""
224
+ return str(content)
225
+
226
+
227
  def normalize_history(history: Optional[List[Any]]) -> ChatHistory:
228
+ """Coerce Gradio chat history into openai-style message dicts."""
229
+ raw = history or []
230
+ if not isinstance(raw, list) or not raw:
231
+ return []
232
+
233
+ if isinstance(raw[0], dict):
234
+ normalized: ChatHistory = []
235
+ for msg in raw:
236
+ if not isinstance(msg, dict):
237
+ continue
238
+ role = msg.get("role")
239
+ if not role:
240
+ continue
241
+ normalized.append(msg)
242
+ return normalized
243
+
244
+ normalized = []
245
+ for turn in raw:
246
  if not isinstance(turn, (list, tuple)) or len(turn) != 2:
247
  continue
248
  user_turn, assistant_turn = turn
249
+ if user_turn:
250
+ normalized.append({"role": "user", "content": str(user_turn)})
251
+ if assistant_turn:
252
+ normalized.append({"role": "assistant", "content": str(assistant_turn)})
253
  return normalized
254
 
255
 
 
490
  base_prompt = append_ai_hand_to_prompt(persona_prompt, ai_hand)
491
  messages: List[Dict[str, Any]] = [{"role": "system", "content": base_prompt}]
492
 
493
+ for msg in history:
494
+ if not isinstance(msg, dict):
495
+ continue
496
+ role = msg.get("role")
497
+ if role not in ("user", "assistant"):
498
+ continue
499
+ text = content_to_text(msg.get("content"))
500
+ if text:
501
+ messages.append({"role": role, "content": text})
502
 
503
  tool_output_note = ""
504
  if tool_results.get("tool_outputs"):
 
760
  if not card:
761
  status = hand_status_md(deck, player_hand) + " • **No more cards** in the tackle box."
762
  notice = "*You reached for the tacklebox, but it was empty.*"
763
+ chat_history.append({"role": "assistant", "content": notice})
764
  return render_hand_html(player_hand), status, chat_history, deck, player_hand, ai_hand
765
 
766
  player_hand.add_card(card)
 
780
  notice_lines.append(shared_with_ai_notice)
781
 
782
  notice = "\n".join(notice_lines)
783
+ chat_history.append({"role": "assistant", "content": notice})
784
  return render_hand_html(player_hand), status, chat_history, deck, player_hand, ai_hand
785
 
786
 
 
790
  return format_ai_hand_debug(ai_hand)
791
 
792
 
793
+ def example_prompt_water() -> str:
794
+ return "Hey, how does the water look today?"
795
+
796
+
797
+ def example_prompt_catch() -> str:
798
+ return "What do you think we might catch?"
799
+
800
+
801
+ def example_prompt_memory() -> str:
802
+ return "Remember fishing when we were young?"
803
+
804
+
805
  def reset_session() -> Tuple[str, str, ChatHistory, TackleboxDeck, Hand, Hand]:
806
  hand_html, status, chat_history, deck, player_hand, ai_hand = init_session()
807
  return hand_html, status + " • Reset.", chat_history, deck, player_hand, ai_hand
 
964
  reset_btn = gr.Button("Reset session", variant="secondary")
965
 
966
  with gr.Group(elem_classes=["section-card"]):
967
+ with gr.Row():
968
+ prompt_water_btn = gr.Button("Water today?")
969
+ prompt_catch_btn = gr.Button("What might we catch?")
970
+ prompt_memory_btn = gr.Button("Old memory")
971
+
972
+ chat_input = gr.Textbox(
973
+ placeholder="Say something as Robin…",
974
+ container=False,
975
+ )
976
+ chat_bot = gr.Chatbot(type="messages", height=360)
977
+
978
  chat = gr.ChatInterface(
979
  orchestrate_chat,
980
+ type="messages",
981
+ chatbot=chat_bot,
982
+ textbox=chat_input,
983
  additional_inputs=[
984
  persona_prompt_state,
985
  api_key_state,
 
996
  ],
997
  title="Tacklebox Chat",
998
  description="Robin chats with Player 2 (LLM persona).",
999
+ # On Hugging Face Spaces, examples are cached by default which can
1000
+ # break startup (and/or trigger OpenAI calls) when additional
1001
+ # inputs/outputs include complex state.
1002
+ cache_examples=False,
 
1003
  )
1004
 
1005
+ prompt_water_btn.click(example_prompt_water, outputs=chat_input)
1006
+ prompt_catch_btn.click(example_prompt_catch, outputs=chat_input)
1007
+ prompt_memory_btn.click(example_prompt_memory, outputs=chat_input)
1008
+
1009
  apply_btn.click(
1010
  update_player_two_from_form,
1011
  inputs=[
 
1053
 
1054
 
1055
  if __name__ == "__main__":
1056
+ try:
1057
+ demo.launch(ssr_mode=False)
1058
+ except TypeError as exc:
1059
+ if "ssr_mode" not in str(exc):
1060
+ raise
1061
+ demo.launch()