Fazzioni commited on
Commit
88881cd
·
verified ·
1 Parent(s): 18e4f00

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +15 -16
src/app.py CHANGED
@@ -5,6 +5,17 @@ import base64
5
  from pathlib import Path
6
  from backend import load_model, ChatModel
7
  from config import get_model_options
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  st.set_page_config(page_title="Small LLM - Chat", layout="wide")
10
 
@@ -86,6 +97,7 @@ with st.sidebar:
86
  st.session_state.chat_model = chat_model
87
  st.session_state.model_info = selected_model_config
88
  st.session_state.model_name = model_name
 
89
  st.success("✅ Modelo carregado!")
90
  if "messages" in st.session_state:
91
  del st.session_state.messages
@@ -105,6 +117,7 @@ with st.sidebar:
105
 
106
  if st.button("🗑️ Limpar Histórico", use_container_width=True):
107
  chat_model.clear_history()
 
108
  if "messages" in st.session_state:
109
  del st.session_state.messages
110
  st.rerun()
@@ -112,22 +125,7 @@ with st.sidebar:
112
  # Área principal - Chat
113
  if "chat_model" not in st.session_state:
114
  st.info("👈 Use a sidebar para carregar um modelo primeiro.")
115
- st.markdown("""
116
- ### Modelos disponíveis:
117
-
118
- **Google Gemma:**
119
- - `google/gemma-3-4b-it` - 4 bilhões de parâmetros
120
- - `google/gemma-3-1b-it` - 1 bilhão de parâmetros
121
- - `google/gemma-3-270m-it` - 270 milhões de parâmetros
122
-
123
- **Qwen:**
124
- - `Qwen/Qwen3-0.6B` - 600 milhões de parâmetros
125
- - `Qwen/Qwen2.5-0.5B-Instruct` - 500 milhões (instruct)
126
- - `Qwen/Qwen2.5-0.5B` - 500 milhões
127
-
128
- **Facebook:**
129
- - `facebook/MobileLLM-R1-950M` - 950 milhões de parâmetros
130
- """)
131
  else:
132
  chat_model = st.session_state.chat_model
133
 
@@ -171,6 +169,7 @@ else:
171
 
172
  chat_model.add_assistant_message(full_response)
173
  st.session_state.messages.append({"role": "assistant", "content": full_response})
 
174
 
175
  except Exception as e:
176
  error_msg = f"Erro na geração: {str(e)}"
 
5
  from pathlib import Path
6
  from backend import load_model, ChatModel
7
  from config import get_model_options
8
+ import uuid
9
+
10
+ def set_new_message_uuid():
11
+ st.session_state.message_uuid = uuid.uuid4()
12
+ logging.info(f'[NEW MESSAGE UUID] {st.session_state.message_uuid}')
13
+
14
+ def save_messages():
15
+ messages = chat_model.conversation.messages
16
+ message_uuid = st.session_state.message_uuid
17
+ logging.info(f'[Messages save_at:] {messages} com {message_uuid}')
18
+
19
 
20
  st.set_page_config(page_title="Small LLM - Chat", layout="wide")
21
 
 
97
  st.session_state.chat_model = chat_model
98
  st.session_state.model_info = selected_model_config
99
  st.session_state.model_name = model_name
100
+ set_new_message_uuid()
101
  st.success("✅ Modelo carregado!")
102
  if "messages" in st.session_state:
103
  del st.session_state.messages
 
117
 
118
  if st.button("🗑️ Limpar Histórico", use_container_width=True):
119
  chat_model.clear_history()
120
+ set_new_message_uuid()
121
  if "messages" in st.session_state:
122
  del st.session_state.messages
123
  st.rerun()
 
125
  # Área principal - Chat
126
  if "chat_model" not in st.session_state:
127
  st.info("👈 Use a sidebar para carregar um modelo primeiro.")
128
+ st.markdown(""" """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  else:
130
  chat_model = st.session_state.chat_model
131
 
 
169
 
170
  chat_model.add_assistant_message(full_response)
171
  st.session_state.messages.append({"role": "assistant", "content": full_response})
172
+ save_messages()
173
 
174
  except Exception as e:
175
  error_msg = f"Erro na geração: {str(e)}"