Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +29 -54
src/streamlit_app.py
CHANGED
|
@@ -19,18 +19,14 @@ if GOOGLE_API_KEY:
|
|
| 19 |
try:
|
| 20 |
genai.configure(api_key=GOOGLE_API_KEY)
|
| 21 |
except Exception as e:
|
| 22 |
-
# Cette erreur est critique, donc on l'affiche de manière proéminente.
|
| 23 |
st.error(f"Erreur de configuration de l'API Google : {e}")
|
| 24 |
else:
|
| 25 |
-
# Avertissement si la clé n'est pas trouvée au démarrage.
|
| 26 |
st.warning("La clé API Google (GOOGLE_API_KEY) n'est pas configurée. L'application ne pourra pas fonctionner.")
|
| 27 |
|
| 28 |
|
| 29 |
# -----------------------------------------------------------------------------
|
| 30 |
# Définition des modèles disponibles
|
| 31 |
# -----------------------------------------------------------------------------
|
| 32 |
-
# Remplacé par les modèles Gemini les plus récents et pertinents.
|
| 33 |
-
# 'latest' pointe toujours vers la version la plus récente du modèle.
|
| 34 |
AVAILABLE_MODELS = [
|
| 35 |
{
|
| 36 |
"id": "gemini-1.5-flash-latest",
|
|
@@ -52,11 +48,9 @@ DEFAULT_MODEL_ID = "gemini-1.5-flash-latest"
|
|
| 52 |
def initialize_session_state():
|
| 53 |
"""Initialise toutes les variables nécessaires dans le session state pour éviter les erreurs."""
|
| 54 |
|
| 55 |
-
# Messages par défaut
|
| 56 |
DEFAULT_SYSTEM_MESSAGE = "Vous êtes KolaChatBot, un assistant IA serviable, créatif et honnête. Répondez en français."
|
| 57 |
DEFAULT_STARTER_MESSAGE = "Bonjour ! Je suis KolaChatBot. Comment puis-je vous aider aujourd'hui ? 🤖"
|
| 58 |
|
| 59 |
-
# Initialisation de base
|
| 60 |
if "selected_model_id" not in st.session_state:
|
| 61 |
st.session_state.selected_model_id = DEFAULT_MODEL_ID
|
| 62 |
if "system_message" not in st.session_state:
|
|
@@ -66,7 +60,6 @@ def initialize_session_state():
|
|
| 66 |
if "chat_history" not in st.session_state:
|
| 67 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 68 |
|
| 69 |
-
# Paramètres de génération
|
| 70 |
if "max_response_length" not in st.session_state:
|
| 71 |
st.session_state.max_response_length = 1024
|
| 72 |
if "temperature" not in st.session_state:
|
|
@@ -74,15 +67,12 @@ def initialize_session_state():
|
|
| 74 |
if "top_p" not in st.session_state:
|
| 75 |
st.session_state.top_p = 0.95
|
| 76 |
|
| 77 |
-
# Fonctionnalité de recherche web
|
| 78 |
if "enable_web_search" not in st.session_state:
|
| 79 |
st.session_state.enable_web_search = False
|
| 80 |
|
| 81 |
-
# Stockage temporaire pour les résultats de recherche (pour affichage)
|
| 82 |
if 'last_search_results' not in st.session_state:
|
| 83 |
st.session_state.last_search_results = None
|
| 84 |
|
| 85 |
-
# Appel de la fonction d'initialisation au début du script
|
| 86 |
initialize_session_state()
|
| 87 |
|
| 88 |
# -----------------------------------------------------------------------------
|
|
@@ -96,10 +86,7 @@ def format_history_to_txt(chat_history: list[dict]) -> str:
|
|
| 96 |
return "".join(lines)
|
| 97 |
|
| 98 |
def format_history_to_json(chat_history: list[dict]) -> str:
|
| 99 |
-
export_data = {
|
| 100 |
-
"export_date": datetime.now().isoformat(),
|
| 101 |
-
"conversation": chat_history
|
| 102 |
-
}
|
| 103 |
return json.dumps(export_data, indent=2, ensure_ascii=False)
|
| 104 |
|
| 105 |
def format_history_to_md(chat_history: list[dict]) -> str:
|
|
@@ -114,27 +101,27 @@ def format_history_to_md(chat_history: list[dict]) -> str:
|
|
| 114 |
# Fonctions principales (Recherche Web et Appel API)
|
| 115 |
# -----------------------------------------------------------------------------
|
| 116 |
def perform_web_search(query: str, num_results: int = 5) -> tuple[str, list]:
|
| 117 |
-
|
| 118 |
-
st.session_state.last_search_results = None # Réinitialiser à chaque recherche
|
| 119 |
try:
|
| 120 |
with DDGS() as ddgs:
|
| 121 |
results = list(ddgs.text(keywords=query, region='fr-fr', max_results=num_results))
|
| 122 |
if not results:
|
| 123 |
return "Aucun résultat de recherche trouvé.", []
|
| 124 |
-
|
| 125 |
formatted_context = ""
|
| 126 |
-
source_details = []
|
| 127 |
for i, res in enumerate(results):
|
| 128 |
formatted_context += f"[Source {i+1}]\nTitre: {res.get('title', 'N/A')}\nExtrait: {res.get('body', 'N/A')}\nURL: {res.get('href', 'N/A')}\n\n"
|
| 129 |
-
source_details.append(res)
|
| 130 |
|
| 131 |
-
st.session_state.last_search_results =
|
| 132 |
-
return formatted_context,
|
| 133 |
except Exception as e:
|
| 134 |
return f"Erreur lors de la recherche web: {e}", []
|
| 135 |
|
| 136 |
def get_gemini_response_stream(model_id: str, system_prompt: str, chat_history_for_api: list[dict], params: dict):
|
| 137 |
-
"""
|
|
|
|
|
|
|
|
|
|
| 138 |
if not GOOGLE_API_KEY:
|
| 139 |
yield "Erreur: La clé API Google n'est pas configurée. Veuillez l'ajouter pour continuer."
|
| 140 |
return
|
|
@@ -145,12 +132,12 @@ def get_gemini_response_stream(model_id: str, system_prompt: str, chat_history_f
|
|
| 145 |
system_instruction=system_prompt
|
| 146 |
)
|
| 147 |
|
| 148 |
-
|
|
|
|
|
|
|
| 149 |
for msg in chat_history_for_api:
|
| 150 |
role = 'user' if msg['role'] == 'user' else 'model'
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
current_prompt = gemini_history.pop()['parts'][0]
|
| 154 |
|
| 155 |
generation_config = genai.types.GenerationConfig(
|
| 156 |
max_output_tokens=params.get("max_new_tokens"),
|
|
@@ -158,10 +145,10 @@ def get_gemini_response_stream(model_id: str, system_prompt: str, chat_history_f
|
|
| 158 |
top_p=params.get("top_p"),
|
| 159 |
)
|
| 160 |
|
|
|
|
| 161 |
response_stream = model.generate_content(
|
| 162 |
-
|
| 163 |
generation_config=generation_config,
|
| 164 |
-
history=gemini_history,
|
| 165 |
stream=True
|
| 166 |
)
|
| 167 |
|
|
@@ -173,7 +160,7 @@ def get_gemini_response_stream(model_id: str, system_prompt: str, chat_history_f
|
|
| 173 |
yield f"Erreur lors de l'appel à l'API Google: {e}"
|
| 174 |
|
| 175 |
# -----------------------------------------------------------------------------
|
| 176 |
-
# Configuration de la page Streamlit
|
| 177 |
# -----------------------------------------------------------------------------
|
| 178 |
st.set_page_config(page_title="KolaChatBot IA", page_icon="🤖", layout="wide")
|
| 179 |
|
|
@@ -181,9 +168,6 @@ st.title("🤖 KolaChatBot IA")
|
|
| 181 |
selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.selected_model_id), None)
|
| 182 |
st.markdown(f"*Modèle actuel : **{selected_model_info['name']}***")
|
| 183 |
|
| 184 |
-
# -----------------------------------------------------------------------------
|
| 185 |
-
# Barre latérale (Sidebar)
|
| 186 |
-
# -----------------------------------------------------------------------------
|
| 187 |
with st.sidebar:
|
| 188 |
st.header("🛠️ Configuration")
|
| 189 |
|
|
@@ -191,7 +175,6 @@ with st.sidebar:
|
|
| 191 |
model_options = {model['id']: model['name'] for model in AVAILABLE_MODELS}
|
| 192 |
|
| 193 |
def on_model_change():
|
| 194 |
-
"""Callback pour réinitialiser la conversation lors du changement de modèle."""
|
| 195 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 196 |
st.toast(f"Modèle changé. Conversation réinitialisée.")
|
| 197 |
|
|
@@ -222,23 +205,23 @@ with st.sidebar:
|
|
| 222 |
|
| 223 |
st.subheader("🔄 Gestion")
|
| 224 |
col1, col2 = st.columns(2)
|
| 225 |
-
if col1.button("♻️ Nouvelle Conv.", use_container_width=True
|
| 226 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 227 |
st.toast("Nouvelle conversation démarrée.")
|
| 228 |
st.rerun()
|
| 229 |
-
if col2.button("🗑️ Effacer", type="primary", use_container_width=True
|
| 230 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 231 |
st.toast("Conversation effacée.")
|
| 232 |
st.rerun()
|
| 233 |
|
| 234 |
st.subheader("📥 Exporter")
|
| 235 |
if len(st.session_state.chat_history) > 1:
|
| 236 |
-
|
| 237 |
-
st.download_button("TXT", format_history_to_txt(st.session_state.chat_history), f"kolachat_{
|
| 238 |
-
st.download_button("JSON", format_history_to_json(st.session_state.chat_history), f"kolachat_{
|
| 239 |
-
st.download_button("Markdown", format_history_to_md(st.session_state.chat_history), f"kolachat_{
|
| 240 |
else:
|
| 241 |
-
st.caption("
|
| 242 |
|
| 243 |
st.divider()
|
| 244 |
st.markdown("""
|
|
@@ -256,49 +239,43 @@ for message in st.session_state.chat_history:
|
|
| 256 |
avatar = "👤" if message["role"] == "user" else "🤖"
|
| 257 |
with st.chat_message(message["role"], avatar=avatar):
|
| 258 |
st.markdown(message["content"])
|
| 259 |
-
# Afficher les sources si elles sont attachées au message de l'assistant
|
| 260 |
if message.get("sources"):
|
| 261 |
with st.expander("Sources web consultées", expanded=False):
|
| 262 |
for i, source in enumerate(message["sources"]):
|
| 263 |
st.markdown(f"**{i+1}. {source.get('title', 'Titre inconnu')}**\n"
|
| 264 |
f"[*Source*]({source.get('href', '#')})\n"
|
| 265 |
-
f"> {source.get('body', 'Aucun extrait
|
| 266 |
|
| 267 |
# Logique de traitement de l'entrée utilisateur
|
| 268 |
-
if prompt := st.chat_input(
|
| 269 |
-
# Ajouter et afficher le message de l'utilisateur
|
| 270 |
st.session_state.chat_history.append({"role": "user", "content": prompt, "type": "text"})
|
| 271 |
with st.chat_message("user", avatar="👤"):
|
| 272 |
st.markdown(prompt)
|
| 273 |
|
| 274 |
-
# Préparer et afficher la réponse de l'assistant
|
| 275 |
with st.chat_message("assistant", avatar="🤖"):
|
| 276 |
history_for_api = st.session_state.chat_history.copy()
|
| 277 |
|
| 278 |
-
# Logique de Recherche Web (RAG)
|
| 279 |
if st.session_state.enable_web_search:
|
| 280 |
with st.spinner("KolaChatBot recherche sur le web..."):
|
| 281 |
search_context, sources = perform_web_search(prompt)
|
| 282 |
|
| 283 |
if sources:
|
| 284 |
rag_prompt = (
|
| 285 |
-
"En te basant STRICTEMENT sur les informations suivantes
|
| 286 |
"Cite tes sources en utilisant le format [Source X] après chaque phrase concernée.\n\n"
|
| 287 |
-
f"--- CONTEXTE
|
| 288 |
-
f"Question
|
| 289 |
)
|
| 290 |
history_for_api[-1]['content'] = rag_prompt
|
| 291 |
else:
|
| 292 |
st.toast("La recherche web n'a pas fourni de résultats.")
|
| 293 |
|
| 294 |
-
# Appel API et Affichage en Streaming
|
| 295 |
params = {
|
| 296 |
"max_new_tokens": st.session_state.max_response_length,
|
| 297 |
"temperature": st.session_state.temperature,
|
| 298 |
"top_p": st.session_state.top_p,
|
| 299 |
}
|
| 300 |
|
| 301 |
-
# Utiliser st.write_stream pour afficher la réponse mot par mot
|
| 302 |
response_content = st.write_stream(get_gemini_response_stream(
|
| 303 |
st.session_state.selected_model_id,
|
| 304 |
st.session_state.system_message,
|
|
@@ -306,13 +283,11 @@ if prompt := st.chat_input(f"Envoyer un message...", disabled=not GOOGLE_API_KEY
|
|
| 306 |
params
|
| 307 |
))
|
| 308 |
|
| 309 |
-
# Sauvegarder la réponse complète de l'assistant dans l'historique
|
| 310 |
assistant_message = {"role": "assistant", "content": response_content, "type": "text"}
|
| 311 |
if st.session_state.get('last_search_results'):
|
| 312 |
assistant_message["sources"] = st.session_state.last_search_results
|
| 313 |
-
st.session_state.last_search_results = None
|
| 314 |
|
| 315 |
st.session_state.chat_history.append(assistant_message)
|
| 316 |
-
# Un seul `rerun` est nécessaire si les sources ont été utilisées pour les réafficher correctement.
|
| 317 |
if "sources" in assistant_message:
|
| 318 |
st.rerun()
|
|
|
|
| 19 |
try:
|
| 20 |
genai.configure(api_key=GOOGLE_API_KEY)
|
| 21 |
except Exception as e:
|
|
|
|
| 22 |
st.error(f"Erreur de configuration de l'API Google : {e}")
|
| 23 |
else:
|
|
|
|
| 24 |
st.warning("La clé API Google (GOOGLE_API_KEY) n'est pas configurée. L'application ne pourra pas fonctionner.")
|
| 25 |
|
| 26 |
|
| 27 |
# -----------------------------------------------------------------------------
|
| 28 |
# Définition des modèles disponibles
|
| 29 |
# -----------------------------------------------------------------------------
|
|
|
|
|
|
|
| 30 |
AVAILABLE_MODELS = [
|
| 31 |
{
|
| 32 |
"id": "gemini-1.5-flash-latest",
|
|
|
|
| 48 |
def initialize_session_state():
|
| 49 |
"""Initialise toutes les variables nécessaires dans le session state pour éviter les erreurs."""
|
| 50 |
|
|
|
|
| 51 |
DEFAULT_SYSTEM_MESSAGE = "Vous êtes KolaChatBot, un assistant IA serviable, créatif et honnête. Répondez en français."
|
| 52 |
DEFAULT_STARTER_MESSAGE = "Bonjour ! Je suis KolaChatBot. Comment puis-je vous aider aujourd'hui ? 🤖"
|
| 53 |
|
|
|
|
| 54 |
if "selected_model_id" not in st.session_state:
|
| 55 |
st.session_state.selected_model_id = DEFAULT_MODEL_ID
|
| 56 |
if "system_message" not in st.session_state:
|
|
|
|
| 60 |
if "chat_history" not in st.session_state:
|
| 61 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 62 |
|
|
|
|
| 63 |
if "max_response_length" not in st.session_state:
|
| 64 |
st.session_state.max_response_length = 1024
|
| 65 |
if "temperature" not in st.session_state:
|
|
|
|
| 67 |
if "top_p" not in st.session_state:
|
| 68 |
st.session_state.top_p = 0.95
|
| 69 |
|
|
|
|
| 70 |
if "enable_web_search" not in st.session_state:
|
| 71 |
st.session_state.enable_web_search = False
|
| 72 |
|
|
|
|
| 73 |
if 'last_search_results' not in st.session_state:
|
| 74 |
st.session_state.last_search_results = None
|
| 75 |
|
|
|
|
| 76 |
initialize_session_state()
|
| 77 |
|
| 78 |
# -----------------------------------------------------------------------------
|
|
|
|
| 86 |
return "".join(lines)
|
| 87 |
|
| 88 |
def format_history_to_json(chat_history: list[dict]) -> str:
|
| 89 |
+
export_data = {"export_date": datetime.now().isoformat(), "conversation": chat_history}
|
|
|
|
|
|
|
|
|
|
| 90 |
return json.dumps(export_data, indent=2, ensure_ascii=False)
|
| 91 |
|
| 92 |
def format_history_to_md(chat_history: list[dict]) -> str:
|
|
|
|
| 101 |
# Fonctions principales (Recherche Web et Appel API)
|
| 102 |
# -----------------------------------------------------------------------------
|
| 103 |
def perform_web_search(query: str, num_results: int = 5) -> tuple[str, list]:
|
| 104 |
+
st.session_state.last_search_results = None
|
|
|
|
| 105 |
try:
|
| 106 |
with DDGS() as ddgs:
|
| 107 |
results = list(ddgs.text(keywords=query, region='fr-fr', max_results=num_results))
|
| 108 |
if not results:
|
| 109 |
return "Aucun résultat de recherche trouvé.", []
|
| 110 |
+
|
| 111 |
formatted_context = ""
|
|
|
|
| 112 |
for i, res in enumerate(results):
|
| 113 |
formatted_context += f"[Source {i+1}]\nTitre: {res.get('title', 'N/A')}\nExtrait: {res.get('body', 'N/A')}\nURL: {res.get('href', 'N/A')}\n\n"
|
|
|
|
| 114 |
|
| 115 |
+
st.session_state.last_search_results = results
|
| 116 |
+
return formatted_context, results
|
| 117 |
except Exception as e:
|
| 118 |
return f"Erreur lors de la recherche web: {e}", []
|
| 119 |
|
| 120 |
def get_gemini_response_stream(model_id: str, system_prompt: str, chat_history_for_api: list[dict], params: dict):
|
| 121 |
+
"""
|
| 122 |
+
Appelle l'API Google Gemini et retourne un générateur (stream) pour la réponse.
|
| 123 |
+
CETTE FONCTION EST CORRIGÉE.
|
| 124 |
+
"""
|
| 125 |
if not GOOGLE_API_KEY:
|
| 126 |
yield "Erreur: La clé API Google n'est pas configurée. Veuillez l'ajouter pour continuer."
|
| 127 |
return
|
|
|
|
| 132 |
system_instruction=system_prompt
|
| 133 |
)
|
| 134 |
|
| 135 |
+
# *** CORRECTION APPLIQUÉE ICI ***
|
| 136 |
+
# Prépare l'historique complet pour l'API dans le format attendu.
|
| 137 |
+
api_contents = []
|
| 138 |
for msg in chat_history_for_api:
|
| 139 |
role = 'user' if msg['role'] == 'user' else 'model'
|
| 140 |
+
api_contents.append({"role": role, "parts": [msg['content']]})
|
|
|
|
|
|
|
| 141 |
|
| 142 |
generation_config = genai.types.GenerationConfig(
|
| 143 |
max_output_tokens=params.get("max_new_tokens"),
|
|
|
|
| 145 |
top_p=params.get("top_p"),
|
| 146 |
)
|
| 147 |
|
| 148 |
+
# Appelle generate_content avec l'argument 'contents' qui contient toute la conversation.
|
| 149 |
response_stream = model.generate_content(
|
| 150 |
+
contents=api_contents, # Argument correct
|
| 151 |
generation_config=generation_config,
|
|
|
|
| 152 |
stream=True
|
| 153 |
)
|
| 154 |
|
|
|
|
| 160 |
yield f"Erreur lors de l'appel à l'API Google: {e}"
|
| 161 |
|
| 162 |
# -----------------------------------------------------------------------------
|
| 163 |
+
# Configuration de la page Streamlit et de la Sidebar
|
| 164 |
# -----------------------------------------------------------------------------
|
| 165 |
st.set_page_config(page_title="KolaChatBot IA", page_icon="🤖", layout="wide")
|
| 166 |
|
|
|
|
| 168 |
selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.selected_model_id), None)
|
| 169 |
st.markdown(f"*Modèle actuel : **{selected_model_info['name']}***")
|
| 170 |
|
|
|
|
|
|
|
|
|
|
| 171 |
with st.sidebar:
|
| 172 |
st.header("🛠️ Configuration")
|
| 173 |
|
|
|
|
| 175 |
model_options = {model['id']: model['name'] for model in AVAILABLE_MODELS}
|
| 176 |
|
| 177 |
def on_model_change():
|
|
|
|
| 178 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 179 |
st.toast(f"Modèle changé. Conversation réinitialisée.")
|
| 180 |
|
|
|
|
| 205 |
|
| 206 |
st.subheader("🔄 Gestion")
|
| 207 |
col1, col2 = st.columns(2)
|
| 208 |
+
if col1.button("♻️ Nouvelle Conv.", use_container_width=True):
|
| 209 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 210 |
st.toast("Nouvelle conversation démarrée.")
|
| 211 |
st.rerun()
|
| 212 |
+
if col2.button("🗑️ Effacer", type="primary", use_container_width=True):
|
| 213 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 214 |
st.toast("Conversation effacée.")
|
| 215 |
st.rerun()
|
| 216 |
|
| 217 |
st.subheader("📥 Exporter")
|
| 218 |
if len(st.session_state.chat_history) > 1:
|
| 219 |
+
ts = datetime.now().strftime("%Y%m%d_%H%M")
|
| 220 |
+
st.download_button("TXT", format_history_to_txt(st.session_state.chat_history), f"kolachat_{ts}.txt")
|
| 221 |
+
st.download_button("JSON", format_history_to_json(st.session_state.chat_history), f"kolachat_{ts}.json")
|
| 222 |
+
st.download_button("Markdown", format_history_to_md(st.session_state.chat_history), f"kolachat_{ts}.md")
|
| 223 |
else:
|
| 224 |
+
st.caption("Conversation vide.")
|
| 225 |
|
| 226 |
st.divider()
|
| 227 |
st.markdown("""
|
|
|
|
| 239 |
avatar = "👤" if message["role"] == "user" else "🤖"
|
| 240 |
with st.chat_message(message["role"], avatar=avatar):
|
| 241 |
st.markdown(message["content"])
|
|
|
|
| 242 |
if message.get("sources"):
|
| 243 |
with st.expander("Sources web consultées", expanded=False):
|
| 244 |
for i, source in enumerate(message["sources"]):
|
| 245 |
st.markdown(f"**{i+1}. {source.get('title', 'Titre inconnu')}**\n"
|
| 246 |
f"[*Source*]({source.get('href', '#')})\n"
|
| 247 |
+
f"> {source.get('body', 'Aucun extrait.')}\n---")
|
| 248 |
|
| 249 |
# Logique de traitement de l'entrée utilisateur
|
| 250 |
+
if prompt := st.chat_input("Envoyer un message...", disabled=not GOOGLE_API_KEY):
|
|
|
|
| 251 |
st.session_state.chat_history.append({"role": "user", "content": prompt, "type": "text"})
|
| 252 |
with st.chat_message("user", avatar="👤"):
|
| 253 |
st.markdown(prompt)
|
| 254 |
|
|
|
|
| 255 |
with st.chat_message("assistant", avatar="🤖"):
|
| 256 |
history_for_api = st.session_state.chat_history.copy()
|
| 257 |
|
|
|
|
| 258 |
if st.session_state.enable_web_search:
|
| 259 |
with st.spinner("KolaChatBot recherche sur le web..."):
|
| 260 |
search_context, sources = perform_web_search(prompt)
|
| 261 |
|
| 262 |
if sources:
|
| 263 |
rag_prompt = (
|
| 264 |
+
"En te basant STRICTEMENT sur les informations suivantes, réponds à la question. "
|
| 265 |
"Cite tes sources en utilisant le format [Source X] après chaque phrase concernée.\n\n"
|
| 266 |
+
f"--- CONTEXTE ---\n{search_context}\n--- FIN DU CONTEXTE ---\n\n"
|
| 267 |
+
f"Question : {prompt}"
|
| 268 |
)
|
| 269 |
history_for_api[-1]['content'] = rag_prompt
|
| 270 |
else:
|
| 271 |
st.toast("La recherche web n'a pas fourni de résultats.")
|
| 272 |
|
|
|
|
| 273 |
params = {
|
| 274 |
"max_new_tokens": st.session_state.max_response_length,
|
| 275 |
"temperature": st.session_state.temperature,
|
| 276 |
"top_p": st.session_state.top_p,
|
| 277 |
}
|
| 278 |
|
|
|
|
| 279 |
response_content = st.write_stream(get_gemini_response_stream(
|
| 280 |
st.session_state.selected_model_id,
|
| 281 |
st.session_state.system_message,
|
|
|
|
| 283 |
params
|
| 284 |
))
|
| 285 |
|
|
|
|
| 286 |
assistant_message = {"role": "assistant", "content": response_content, "type": "text"}
|
| 287 |
if st.session_state.get('last_search_results'):
|
| 288 |
assistant_message["sources"] = st.session_state.last_search_results
|
| 289 |
+
st.session_state.last_search_results = None
|
| 290 |
|
| 291 |
st.session_state.chat_history.append(assistant_message)
|
|
|
|
| 292 |
if "sources" in assistant_message:
|
| 293 |
st.rerun()
|