ButterM40 commited on
Commit
9fb3586
Β·
1 Parent(s): 7f7c489

Switch from Gradio to Streamlit - more stable for HF Spaces

Browse files
Files changed (3) hide show
  1. README.md +2 -3
  2. app_streamlit.py +127 -0
  3. requirements.txt +2 -2
README.md CHANGED
@@ -3,9 +3,8 @@ title: Roleplay Chat Box 🎭
3
  emoji: 🎭
4
  colorFrom: purple
5
  colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.35.0
8
- app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: AI roleplay chat with 3 unique characters
 
3
  emoji: 🎭
4
  colorFrom: purple
5
  colorTo: pink
6
+ sdk: streamlit
7
+ app_file: app_streamlit.py
 
8
  pinned: false
9
  license: mit
10
  short_description: AI roleplay chat with 3 unique characters
app_streamlit.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import sys
4
+ import asyncio
5
+
6
+ # Add backend to path for imports
7
+ backend_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'backend')
8
+ sys.path.insert(0, backend_path)
9
+
10
+ from backend.models.character_manager import CharacterManager
11
+
12
+ # Page config
13
+ st.set_page_config(
14
+ page_title="🎭 Roleplay Chat Box",
15
+ page_icon="🎭",
16
+ layout="wide"
17
+ )
18
+
19
+ # Initialize session state
20
+ if 'character_manager' not in st.session_state:
21
+ st.session_state.character_manager = None
22
+ if 'messages' not in st.session_state:
23
+ st.session_state.messages = []
24
+ if 'current_character' not in st.session_state:
25
+ st.session_state.current_character = 'moses'
26
+
27
+ def initialize_models():
28
+ """Initialize the character manager"""
29
+ if st.session_state.character_manager is None:
30
+ with st.spinner("πŸ”„ Loading character models..."):
31
+ try:
32
+ st.session_state.character_manager = CharacterManager()
33
+ loop = asyncio.new_event_loop()
34
+ asyncio.set_event_loop(loop)
35
+ loop.run_until_complete(st.session_state.character_manager.initialize())
36
+ loop.close()
37
+ st.success("βœ… Models loaded successfully!")
38
+ return True
39
+ except Exception as e:
40
+ st.error(f"❌ Error loading models: {str(e)}")
41
+ import traceback
42
+ st.error(traceback.format_exc())
43
+ return False
44
+ return True
45
+
46
+ # Sidebar for character selection
47
+ with st.sidebar:
48
+ st.title("🎭 Characters")
49
+
50
+ character = st.radio(
51
+ "Choose Character",
52
+ options=["moses", "samsung_employee", "jinx"],
53
+ format_func=lambda x: {
54
+ "moses": "πŸ“œ Moses - Biblical Prophet",
55
+ "samsung_employee": "πŸ’Ό Samsung Employee - Tech Expert",
56
+ "jinx": "πŸ’₯ Jinx - Chaotic Genius"
57
+ }[x],
58
+ key="character_selector"
59
+ )
60
+
61
+ # Update character if changed
62
+ if character != st.session_state.current_character:
63
+ st.session_state.current_character = character
64
+ st.session_state.messages = []
65
+
66
+ st.divider()
67
+
68
+ if st.button("πŸ—‘οΈ Clear Chat"):
69
+ st.session_state.messages = []
70
+ st.rerun()
71
+
72
+ st.divider()
73
+ st.markdown("### About")
74
+ st.markdown("""
75
+ This app uses LoRA (Low-Rank Adaptation) to create unique character personalities.
76
+
77
+ - **Base Model**: Qwen3-0.6B
78
+ - **Adapters**: Character-specific LoRA weights
79
+ - **Memory Efficient**: Shares one base model
80
+ """)
81
+
82
+ # Main chat interface
83
+ st.title("🎭 Roleplay Chat Box")
84
+ st.markdown(f"Currently chatting with: **{st.session_state.current_character}**")
85
+
86
+ # Initialize models on first run
87
+ if not initialize_models():
88
+ st.stop()
89
+
90
+ # Display chat messages
91
+ for message in st.session_state.messages:
92
+ with st.chat_message(message["role"]):
93
+ st.markdown(message["content"])
94
+
95
+ # Chat input
96
+ if prompt := st.chat_input("Type your message here..."):
97
+ # Add user message to chat
98
+ st.session_state.messages.append({"role": "user", "content": prompt})
99
+ with st.chat_message("user"):
100
+ st.markdown(prompt)
101
+
102
+ # Generate response
103
+ with st.chat_message("assistant"):
104
+ with st.spinner("Thinking..."):
105
+ try:
106
+ # Convert history for context
107
+ conversation_history = []
108
+ for msg in st.session_state.messages[-6:]: # Last 3 exchanges
109
+ if msg["role"] == "user":
110
+ conversation_history.append({"role": "user", "content": msg["content"]})
111
+ else:
112
+ conversation_history.append({"role": "assistant", "content": msg["content"]})
113
+
114
+ # Generate response
115
+ response = st.session_state.character_manager.generate_response(
116
+ character_id=st.session_state.current_character,
117
+ user_message=prompt,
118
+ conversation_history=conversation_history[:-1] # Exclude current message
119
+ )
120
+
121
+ st.markdown(response)
122
+ st.session_state.messages.append({"role": "assistant", "content": response})
123
+
124
+ except Exception as e:
125
+ error_msg = f"❌ Error: {str(e)}"
126
+ st.error(error_msg)
127
+ st.session_state.messages.append({"role": "assistant", "content": error_msg})
requirements.txt CHANGED
@@ -7,8 +7,8 @@ datasets>=2.14.0
7
  huggingface-hub>=0.19.0
8
  safetensors>=0.4.0
9
 
10
- # Gradio for Hugging Face Spaces - using known stable version
11
- gradio==3.35.0
12
 
13
  # Backend API
14
  fastapi>=0.104.0
 
7
  huggingface-hub>=0.19.0
8
  safetensors>=0.4.0
9
 
10
+ # Streamlit for Hugging Face Spaces - stable and reliable
11
+ streamlit>=1.28.0
12
 
13
  # Backend API
14
  fastapi>=0.104.0