Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +235 -790
src/streamlit_app.py
CHANGED
|
@@ -1,873 +1,318 @@
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
-
import
|
| 4 |
-
from
|
| 5 |
-
import json # Pour l'export JSON
|
| 6 |
-
from datetime import datetime # Pour nommer les fichiers d'export
|
| 7 |
-
import time # Pour les messages d'attente si besoin (non strictement utilisé ici mais utile)
|
| 8 |
-
from PIL import Image # Pour potentiellement ouvrir/manipuler l'image si nécessaire (basé sur pillow)
|
| 9 |
-
from io import BytesIO # Pour lire les données binaires de l'image
|
| 10 |
-
|
| 11 |
-
# Import Google Generative AI library
|
| 12 |
import google.generativeai as genai
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
# Assurez-vous d'avoir installé cette bibliothèque : pip install duckduckgo_search
|
| 16 |
-
from duckduckgo_search import DDGS # Importe la classe DDGS
|
| 17 |
|
| 18 |
# -----------------------------------------------------------------------------
|
| 19 |
-
#
|
| 20 |
# -----------------------------------------------------------------------------
|
| 21 |
load_dotenv()
|
| 22 |
-
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") # Gardé pour la structure, même si plus de modèles HF actifs
|
| 23 |
-
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") # Add Google API Key (assumed from Streamlit Cloud secrets)
|
| 24 |
|
| 25 |
-
# API
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
# -----------------------------------------------------------------------------
|
| 29 |
-
#
|
| 30 |
# -----------------------------------------------------------------------------
|
| 31 |
-
#
|
|
|
|
| 32 |
AVAILABLE_MODELS = [
|
| 33 |
-
# --- Google Text Models (Only keeping Flash as requested) ---
|
| 34 |
{
|
| 35 |
"id": "gemini-1.5-flash-latest",
|
| 36 |
-
"name": "Gemini 1.5 Flash (
|
| 37 |
"provider": "google",
|
| 38 |
-
"type": "text",
|
| 39 |
-
"params": {
|
| 40 |
-
"max_new_tokens": 200,
|
| 41 |
-
"temperature": 0.6,
|
| 42 |
-
"top_p": 0.9,
|
| 43 |
-
},
|
| 44 |
},
|
| 45 |
-
|
| 46 |
-
"id": "gemini-
|
| 47 |
-
"name": "Gemini
|
| 48 |
"provider": "google",
|
| 49 |
-
"type": "text", # Supposé compatible pour le chat texte
|
| 50 |
-
"params": {
|
| 51 |
-
"max_new_tokens": 200,
|
| 52 |
-
"temperature": 0.6,
|
| 53 |
-
"top_p": 0.9,
|
| 54 |
-
},
|
| 55 |
},
|
| 56 |
-
{
|
| 57 |
-
"id": "gemini-2.0-flash-preview-image-generation", # Ajouté comme demandé - NOTE : Son comportement exact comme modèle de chat texte peut varier.
|
| 58 |
-
"name": "Gemini 2.0 Flash Preview Image Gen (Google)",
|
| 59 |
-
"provider": "google",
|
| 60 |
-
"type": "text", # Supposé compatible pour le chat texte malgré son nom
|
| 61 |
-
"params": { # Paramètres par défaut pour le texte
|
| 62 |
-
"max_new_tokens": 200,
|
| 63 |
-
"temperature": 0.6,
|
| 64 |
-
"top_p": 0.9,
|
| 65 |
-
},
|
| 66 |
-
},
|
| 67 |
-
# Retiré : gemini-1.5-pro-latest, gemini-pro, gemini-2.5-flash-preview, gemini-2.5-pro-preview
|
| 68 |
-
# Retiré : Mistral (Hugging Face Text)
|
| 69 |
-
# Retiré : Stable Diffusion XL Base 1.0 (Hugging Face T2I) et les autres SD.
|
| 70 |
]
|
| 71 |
|
| 72 |
-
|
| 73 |
-
MODELS_BY_TYPE = {m_type: [m for m in AVAILABLE_MODELS if m['type'] == m_type] for m_type in set(m['type'] for m in AVAILABLE_MODELS)}
|
| 74 |
-
|
| 75 |
-
# Default task is now fixed to 'text'
|
| 76 |
-
DEFAULT_TASK = 'text'
|
| 77 |
-
# Find the first text model as default
|
| 78 |
-
first_text_model = next((m for m in AVAILABLE_MODELS if m['type'] == DEFAULT_TASK), None)
|
| 79 |
-
if first_text_model:
|
| 80 |
-
DEFAULT_MODEL_ID = first_text_model['id']
|
| 81 |
-
elif AVAILABLE_MODELS: # Fallback to the very first model if no text model exists
|
| 82 |
-
DEFAULT_MODEL_ID = AVAILABLE_MODELS[0]['id']
|
| 83 |
-
else: # No models defined at all
|
| 84 |
-
DEFAULT_MODEL_ID = None
|
| 85 |
-
|
| 86 |
|
| 87 |
# -----------------------------------------------------------------------------
|
| 88 |
-
# Session
|
| 89 |
-
# Initialize all session state variables used BEFORE any UI or logic accesses them.
|
| 90 |
# -----------------------------------------------------------------------------
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
#
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
if
|
| 106 |
-
|
| 107 |
-
# Image-specific parameters are kept for completeness, but will not be used in text-only mode
|
| 108 |
-
if 'num_inference_steps' not in st.session_state: st.session_state.num_inference_steps = 50
|
| 109 |
-
if 'guidance_scale' not in st.session_state: st.session_state.guidance_scale = 7.5
|
| 110 |
-
if 'image_height' not in st.session_state: st.session_state.image_height = 512
|
| 111 |
-
if 'image_width' not in st.session_state: st.session_state.image_width = 512
|
| 112 |
-
if 'negative_prompt' not in st.session_state: st.session_state.negative_prompt = ""
|
| 113 |
-
|
| 114 |
-
# Initialize system and starter messages in session state if not exists
|
| 115 |
-
if "system_message" not in st.session_state:
|
| 116 |
-
st.session_state.system_message = DEFAULT_SYSTEM_MESSAGE
|
| 117 |
-
if "starter_message" not in st.session_state:
|
| 118 |
-
st.session_state.starter_message = DEFAULT_STARTER_MESSAGE
|
| 119 |
-
|
| 120 |
-
# Initialize avatars
|
| 121 |
-
if "avatars" not in st.session_state:
|
| 122 |
-
st.session_state.avatars = {"user": "👤", "assistant": "🤖"}
|
| 123 |
-
|
| 124 |
-
# Initialize chat history in session state if not exists.
|
| 125 |
-
# Start with the initial assistant message with type 'text'
|
| 126 |
-
if "chat_history" not in st.session_state:
|
| 127 |
-
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 128 |
-
|
| 129 |
-
# State variable to track if a reset was triggered by buttons
|
| 130 |
-
if '_reset_triggered' not in st.session_state:
|
| 131 |
-
st.session_state._reset_triggered = False
|
| 132 |
-
|
| 133 |
-
# State variables to detect changes for rerun/reset logic
|
| 134 |
-
# Track task and model ID *before* the radio/selectbox potentially changes them mid-run
|
| 135 |
-
if '_prev_task_before_radio' not in st.session_state: st.session_state._prev_task_before_radio = st.session_state.selected_task
|
| 136 |
-
if '_prev_model_id_before_selectbox' not in st.session_state:
|
| 137 |
-
st.session_state._prev_model_id_before_selectbox = st.session_state.selected_model_id
|
| 138 |
-
|
| 139 |
-
# State for web search feature
|
| 140 |
-
if 'enable_web_search' not in st.session_state:
|
| 141 |
-
st.session_state.enable_web_search = False
|
| 142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
# -----------------------------------------------------------------------------
|
| 145 |
-
#
|
| 146 |
# -----------------------------------------------------------------------------
|
| 147 |
def format_history_to_txt(chat_history: list[dict]) -> str:
|
| 148 |
-
lines = [f"KolaChatBot Conversation
|
| 149 |
for message in chat_history:
|
| 150 |
-
if message.get("role") == "system": continue
|
| 151 |
role = "Utilisateur" if message["role"] == "user" else "KolaChatBot"
|
| 152 |
-
|
| 153 |
-
if content_type == "text" or content_type == "t2i_prompt": # Include user prompts for T2I in text export
|
| 154 |
-
lines.append(f"{role}:\n{message['content']}\n")
|
| 155 |
-
elif content_type == "t2i" and "prompt" in message: # For AI image response, include the prompt if available
|
| 156 |
-
lines.append(f"{role} (Image générée):\nPrompt: {message['prompt']}\n(Image non incluse dans l'export TXT)\n")
|
| 157 |
-
# Ignore other types gracefully
|
| 158 |
-
|
| 159 |
return "".join(lines)
|
| 160 |
|
| 161 |
def format_history_to_json(chat_history: list[dict]) -> str:
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
if message.get("role") == "system": continue
|
| 168 |
-
export_msg = {"role": message["role"], "type": message.get("type", "text")}
|
| 169 |
-
|
| 170 |
-
if export_msg["type"] == "text":
|
| 171 |
-
export_msg["content"] = message.get("content", "")
|
| 172 |
-
elif export_msg["type"] == "t2i_prompt": # User prompt for T2I (kept for handling potential old history)
|
| 173 |
-
export_msg["content"] = message.get("content", "")
|
| 174 |
-
elif export_msg["type"] == "t2i" and "prompt" in message: # AI image response (kept for handling potential old history)
|
| 175 |
-
export_msg["prompt"] = message["prompt"]
|
| 176 |
-
export_msg["image_placeholder"] = "(Image non incluse dans l'export JSON)" # Indicate image was here
|
| 177 |
-
if "content" in message: # Include error message if it was an error T2I response
|
| 178 |
-
export_msg["error_content"] = message["content"]
|
| 179 |
-
|
| 180 |
-
export_history.append(export_msg)
|
| 181 |
-
|
| 182 |
-
return json.dumps(export_history, indent=2, ensure_ascii=False)
|
| 183 |
-
|
| 184 |
|
| 185 |
def format_history_to_md(chat_history: list[dict]) -> str:
|
| 186 |
lines = [f"# KolaChatBot Conversation\n*Exporté le {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n\n"]
|
| 187 |
for message in chat_history:
|
| 188 |
-
if message
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
role_label = f"{user_avatar} **Utilisateur**" if message["role"] == "user" else f"{assistant_avatar} **KolaChatBot**"
|
| 192 |
-
|
| 193 |
-
content_type = message.get("type", "text")
|
| 194 |
-
|
| 195 |
-
lines.append(f"### {role_label}\n\n")
|
| 196 |
-
if content_type == "text":
|
| 197 |
-
lines.append(f"{message.get('content', '')}\n\n")
|
| 198 |
-
elif content_type == "t2i_prompt": # Kept for handling potential old history
|
| 199 |
-
lines.append(f"*Prompt image:* {message.get('content', '')}\n\n")
|
| 200 |
-
elif content_type == "t2i": # Kept for handling potential old history
|
| 201 |
-
prompt_text = message.get('prompt', 'Pas de prompt enregistré')
|
| 202 |
-
error_text = message.get('content', '') # Potential error message
|
| 203 |
-
lines.append(f"*Image générée (prompt: {prompt_text})*\n")
|
| 204 |
-
if error_text:
|
| 205 |
-
lines.append(f"*(Erreur de génération : {error_text})*\n")
|
| 206 |
-
# In actual markdown, you could embed a base64 image or link if saved.
|
| 207 |
-
# For this simple export, just note it.
|
| 208 |
-
lines.append("\n\n")
|
| 209 |
-
|
| 210 |
-
lines.append("---\n\n")
|
| 211 |
-
|
| 212 |
return "".join(lines)
|
| 213 |
|
| 214 |
# -----------------------------------------------------------------------------
|
| 215 |
-
#
|
| 216 |
# -----------------------------------------------------------------------------
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
"""
|
| 221 |
-
Calls the Hugging Face Inference API for either text generation or text-to-image.
|
| 222 |
-
This function is now only relevant for T2I calls if you were to add back T2I models.
|
| 223 |
-
"""
|
| 224 |
-
if not HUGGINGFACEHUB_API_TOKEN:
|
| 225 |
-
return "Erreur d'API Hugging Face: Le token HUGGINGFACEHUB_API_TOKEN est introuvable."
|
| 226 |
-
|
| 227 |
-
headers = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"}
|
| 228 |
-
url = f"{HF_BASE_API_URL}{model_id}"
|
| 229 |
-
|
| 230 |
-
# Since we removed HF text models, this branch is not expected to be hit in this config.
|
| 231 |
-
# If you add HF text models back, ensure the prompt building is done by the caller.
|
| 232 |
-
if model_type == 'text':
|
| 233 |
-
# This case should not be hit with the current AVAILABLE_MODELS list
|
| 234 |
-
st.error("Erreur interne: Modèle texte Hugging Face non pris en charge dans cette configuration.")
|
| 235 |
-
return {"role": "assistant", "content": "Erreur interne: Modèle texte Hugging Face non pris en charge dans cette configuration.", "type": "text"}
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
elif model_type == 't2i':
|
| 239 |
-
# This case should not be hit with the current AVAILABLE_MODELS list
|
| 240 |
-
st.error("Erreur interne: Modèle Text-to-Image Hugging Face non pris en charge dans cette configuration.")
|
| 241 |
-
return {"role": "assistant", "content": "Erreur interne: Modèle Text-to-Image Hugging Face non pris en charge dans cette configuration.", "type": "text", "prompt": payload_inputs}
|
| 242 |
-
|
| 243 |
-
else:
|
| 244 |
-
return f"Erreur interne: Type de modèle Hugging Face '{model_type}' inconnu."
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
# The code below this point would only be reached if you added back HF models.
|
| 248 |
-
# The structure is kept but not actively used in this config.
|
| 249 |
-
payload = {}
|
| 250 |
-
response_parser = None
|
| 251 |
-
response_is_json = False # Default to False for safety
|
| 252 |
-
|
| 253 |
-
# ... (rest of the hf inference logic is now effectively unreachable) ...
|
| 254 |
-
# To avoid linting errors or confusion, let's just make it return an error for any other type.
|
| 255 |
-
return f"Erreur interne: Type de modèle Hugging Face '{model_type}' non géré dans l'appel API."
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
def call_google_api(model_id: str, system_message: str, chat_history_for_api: list[dict], params: dict) -> str:
|
| 260 |
-
"""
|
| 261 |
-
Calls the Google Generative AI API (Text models only).
|
| 262 |
-
Note: The system_message is included for consistency but IGNORED in the API call.
|
| 263 |
-
"""
|
| 264 |
-
if not GOOGLE_API_KEY:
|
| 265 |
-
return "Erreur d'API Google: La clé API Google est introuvable."
|
| 266 |
-
|
| 267 |
try:
|
| 268 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
except Exception as e:
|
| 270 |
-
return f"Erreur de
|
| 271 |
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
# `chat_history_for_api` here is the list of messages relevant to the model turns (filtered by text type).
|
| 278 |
-
gemini_history_parts = []
|
| 279 |
|
| 280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
|
|
|
|
| 282 |
for msg in chat_history_for_api:
|
| 283 |
-
if msg
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
|
| 288 |
generation_config = genai.types.GenerationConfig(
|
| 289 |
-
max_output_tokens=params.get("max_new_tokens"
|
| 290 |
-
temperature=params.get("temperature"
|
| 291 |
-
top_p=params.get("top_p"
|
| 292 |
)
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
contents=gemini_history_parts,
|
| 297 |
generation_config=generation_config,
|
| 298 |
-
|
|
|
|
| 299 |
)
|
| 300 |
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
generated_text = "".join(part.text for part in response.candidates[0].content.parts)
|
| 305 |
-
return generated_text.strip()
|
| 306 |
-
elif hasattr(response.candidates[0], 'finish_reason') and response.candidates[0].finish_reason:
|
| 307 |
-
# Model finished generation early
|
| 308 |
-
return f"API Google: La génération s'est terminée prématurément. Raison: {response.candidates[0].finish_reason}. Prompt feedback: {response.prompt_feedback}"
|
| 309 |
-
else:
|
| 310 |
-
# Candidate exists but no content parts, might be an unhandled scenario
|
| 311 |
-
return f"Erreur API Google: Réponse vide ou inattendue après appel réussi. Debug info: {response.candidates[0]}"
|
| 312 |
-
elif hasattr(response, 'prompt_feedback') and response.prompt_feedback and response.prompt_feedback.block_reason:
|
| 313 |
-
# Prompt was blocked for safety reasons
|
| 314 |
-
return f"API Google: Votre message a été bloqué ({response.prompt_feedback.block_reason}). Raison détaillée: {response.prompt_feedback.safety_ratings}"
|
| 315 |
-
else:
|
| 316 |
-
# No candidates and no block feedback - unknown error
|
| 317 |
-
return f"API Google: Aucune réponse générée pour une raison inconnue. Debug info: {response}"
|
| 318 |
|
| 319 |
except Exception as e:
|
| 320 |
-
|
| 321 |
-
return f"Erreur lors de l'appel API Google: {e}"
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
def _format_time(seconds):
|
| 325 |
-
"""Formats seconds into a human-readable string."""
|
| 326 |
-
if not isinstance(seconds, (int, float)) or seconds < 0: return "N/A"
|
| 327 |
-
minutes = int(seconds // 60)
|
| 328 |
-
remaining_seconds = int(seconds % 60)
|
| 329 |
-
if minutes > 0: return f"{minutes} min {remaining_seconds} sec"
|
| 330 |
-
return f"{remaining_seconds} sec"
|
| 331 |
|
| 332 |
# -----------------------------------------------------------------------------
|
| 333 |
-
#
|
| 334 |
# -----------------------------------------------------------------------------
|
| 335 |
-
|
| 336 |
-
"""Performs a web search using DuckDuckGo and returns formatted results."""
|
| 337 |
-
try:
|
| 338 |
-
# Use DDGS.text for simple text search (DDGS class)
|
| 339 |
-
# It's best practice to use DDGS as a context manager to ensure proper session closure
|
| 340 |
-
with DDGS() as ddgs:
|
| 341 |
-
# Set region for potentially better results
|
| 342 |
-
results = ddgs.text(keywords=query, max_results=num_results, region='fr-fr') # Added region
|
| 343 |
-
|
| 344 |
-
# Convert generator to list for iteration
|
| 345 |
-
results_list = list(results)
|
| 346 |
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
formatted_results = []
|
| 351 |
-
for i, res in enumerate(results_list):
|
| 352 |
-
formatted_results.append(f"Titre: {res.get('title', 'N/A')}")
|
| 353 |
-
formatted_results.append(f"URL: {res.get('href', 'N/A')}")
|
| 354 |
-
formatted_results.append(f"Extrait: {res.get('body', 'N/A')}") # Removed extra newline
|
| 355 |
-
return "\n".join(formatted_results)
|
| 356 |
-
except Exception as e:
|
| 357 |
-
return f"Erreur lors de la recherche web: {e}"
|
| 358 |
|
| 359 |
# -----------------------------------------------------------------------------
|
| 360 |
-
#
|
| 361 |
-
# -----------------------------------------------------------------------------
|
| 362 |
-
|
| 363 |
-
def get_text_response(selected_model_id: str, system_prompt: str, full_chat_history: list[dict]):
|
| 364 |
-
"""
|
| 365 |
-
Handles text generation request using the selected text model.
|
| 366 |
-
This is the primary generation function in this text-only configuration.
|
| 367 |
-
"""
|
| 368 |
-
# Model type compatibility is checked by the caller before calling this function.
|
| 369 |
-
selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == selected_model_id), None)
|
| 370 |
-
if not selected_model_info: # Should not happen due to caller check, but safety
|
| 371 |
-
return {"role": "assistant", "content": f"Erreur interne: Modèle texte '{selected_model_id}' introuvable.", "type": "text"}
|
| 372 |
-
|
| 373 |
-
model_provider = selected_model_info['provider']
|
| 374 |
-
model_id = selected_model_info['id']
|
| 375 |
-
# model_type is 'text' here as checked by the caller
|
| 376 |
-
params = { # Use parameters from session state for text generation
|
| 377 |
-
"max_new_tokens": st.session_state.max_response_length,
|
| 378 |
-
"temperature": st.session_state.temperature,
|
| 379 |
-
"top_p": st.session_state.top_p,
|
| 380 |
-
}
|
| 381 |
-
|
| 382 |
-
# Create a list of actual conversation turns relevant for text models.
|
| 383 |
-
# This excludes the initial assistant starter message if it's just the default one
|
| 384 |
-
# and there's more history. It should only include text messages.
|
| 385 |
-
actual_conversation_history_for_prompt = [
|
| 386 |
-
msg for msg in full_chat_history
|
| 387 |
-
if msg.get("type", "text") == "text" # Only include messages explicitly typed as 'text'
|
| 388 |
-
and not (msg['role'] == 'assistant' and msg['content'] == st.session_state.starter_message and len(full_chat_history) > 1)
|
| 389 |
-
]
|
| 390 |
-
|
| 391 |
-
# For text models, the history for prompt building should end with the user's text message.
|
| 392 |
-
# The last message in the *full_chat_history* is guaranteed to be the user's latest input
|
| 393 |
-
# (which triggered this generation). We need to ensure that user input was *text type*.
|
| 394 |
-
if full_chat_history and full_chat_history[-1].get("type", "text") != "text":
|
| 395 |
-
# This indicates an issue with the calling logic or user input handling
|
| 396 |
-
st.error("Logique d'historique interne invalide: La dernière message utilisateur n'est pas du texte pour la génération de texte.")
|
| 397 |
-
return {"role": "assistant", "content": "Erreur interne: Le dernier message utilisateur n'est pas du texte pour la génération de texte.", "type": "text"}
|
| 398 |
-
|
| 399 |
-
response_content = "" # Initialize outside condition
|
| 400 |
-
|
| 401 |
-
# --- Web Search (RAG) Integration for Google models ---
|
| 402 |
-
if st.session_state.enable_web_search and model_provider == 'google':
|
| 403 |
-
last_user_query = full_chat_history[-1]['content'] # Get the user's last actual query
|
| 404 |
-
|
| 405 |
-
# Display "Recherche en cours..." message
|
| 406 |
-
st.session_state.chat_history.append({"role": "assistant", "content": f"*(KolaChatBot recherche sur le web pour '{last_user_query}'...)*", "type": "text"})
|
| 407 |
-
st.rerun() # Force a rerun to display this message immediately
|
| 408 |
-
|
| 409 |
-
# This part runs on the *next* rerun
|
| 410 |
-
try:
|
| 411 |
-
# Perform the actual web search
|
| 412 |
-
search_results = perform_web_search(last_user_query)
|
| 413 |
-
|
| 414 |
-
# Create a copy of the history for the API call and inject the search results
|
| 415 |
-
temp_api_history = []
|
| 416 |
-
for msg in actual_conversation_history_for_prompt:
|
| 417 |
-
temp_api_history.append(msg.copy()) # Make sure to copy the dict
|
| 418 |
-
|
| 419 |
-
# Prepend search results to the *last user message* in the temp history
|
| 420 |
-
context_message_content = f"Voici des informations pertinentes du web sur le sujet de la dernière question :\n```\n{search_results}\n```\n\nEn te basant sur ces informations (si pertinentes) et notre conversation, réponds à la question : "
|
| 421 |
-
|
| 422 |
-
if temp_api_history and temp_api_history[-1]['role'] == 'user' and temp_api_history[-1]['type'] == 'text':
|
| 423 |
-
temp_api_history[-1]['content'] = context_message_content + temp_api_history[-1]['content']
|
| 424 |
-
else:
|
| 425 |
-
# Fallback: add context as a separate user message if last message isn't a simple user text
|
| 426 |
-
temp_api_history.append({"role": "user", "content": context_message_content, "type": "text"})
|
| 427 |
-
|
| 428 |
-
# Call the Google API with the modified history
|
| 429 |
-
response_content = call_google_api(model_id, system_prompt, temp_api_history, params)
|
| 430 |
-
|
| 431 |
-
# Update the search message in chat history to indicate results were used
|
| 432 |
-
# Find the message we added earlier and update its content
|
| 433 |
-
if st.session_state.chat_history and st.session_state.chat_history[-1]["content"].startswith("*(KolaChatBot recherche web"):
|
| 434 |
-
st.session_state.chat_history[-1]["content"] = f"*(KolaChatBot a terminé la recherche web. Résultats intégrés à la réponse.)*"
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
except Exception as e:
|
| 438 |
-
response_content = f"Erreur lors de la recherche web ou de l'intégration des résultats : {e}"
|
| 439 |
-
# If search failed, update the temporary message to show the error
|
| 440 |
-
if st.session_state.chat_history and st.session_state.chat_history[-1]["content"].startswith("*(KolaChatBot recherche web"):
|
| 441 |
-
st.session_state.chat_history[-1]["content"] = f"*(KolaChatBot a rencontré une erreur lors de la recherche web : {e})*"
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
else: # No web search or not a Google model
|
| 445 |
-
if model_provider == 'google':
|
| 446 |
-
# This path is for Google models WITHOUT web search enabled
|
| 447 |
-
response_content = call_google_api(model_id, system_prompt, actual_conversation_history_for_prompt, params)
|
| 448 |
-
else:
|
| 449 |
-
# This branch is effectively unreachable with the current AVAILABLE_MODELS list (only Google Text models)
|
| 450 |
-
response_content = f"Erreur interne: Fournisseur API '{model_provider}' inconnu pour le modèle texte '{model_id}'."
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
return {"role": "assistant", "content": response_content, "type": "text"}
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
# This function is no longer used in this text-only configuration.
|
| 457 |
-
def get_image_response(selected_model_id: str, user_prompt: str):
|
| 458 |
-
return {"role": "assistant", "content": "La génération d'images n'est pas disponible dans cette version de l'application.", "type": "text", "prompt": user_prompt}
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
# -----------------------------------------------------------------------------
|
| 462 |
-
# Streamlit page configuration
|
| 463 |
-
# -----------------------------------------------------------------------------
|
| 464 |
-
st.set_page_config(page_title="KolaChatBot - Multi-Modèles IA", page_icon="🤖", layout="wide")
|
| 465 |
-
st.title("🤖 KolaChatBot - Multi-Modèles IA")
|
| 466 |
-
|
| 467 |
-
# Dynamically display the currently selected model and task in the description
|
| 468 |
-
# Get the info for the *currently selected* model to display its name
|
| 469 |
-
selected_model_info_display = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.get('selected_model_id')), None)
|
| 470 |
-
selected_model_name_display = selected_model_info_display['name'] if selected_model_info_display else 'Aucun modèle sélectionné' # Fallback text
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
# Current task is now fixed to 'text'
|
| 474 |
-
current_task_label = "Génération de Texte (Conversation)"
|
| 475 |
-
st.markdown(f"*Tâche actuelle : **{current_task_label}***\n*Modèle : **`{selected_model_name_display}`** *")
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
# -----------------------------------------------------------------------------
|
| 479 |
-
# Manuel d'utilisation (Update for text-only and RAG)
|
| 480 |
-
# -----------------------------------------------------------------------------
|
| 481 |
-
with st.expander("📖 Manuel d'utilisation de KolaChatBot", expanded=False):
|
| 482 |
-
st.markdown("""
|
| 483 |
-
Bienvenue sur KolaChatBot - Une application de chat IA ! Voici comment tirer le meilleur parti de notre assistant IA :
|
| 484 |
-
|
| 485 |
-
**1. Comment interagir ?**
|
| 486 |
-
- **La tâche est fixée sur Texte (conversation).**
|
| 487 |
-
- **Entrer votre prompt :** Tapez votre message dans la zone de texte en bas et appuyez sur Entrée.
|
| 488 |
-
|
| 489 |
-
**2. Paramètres dans la barre latérale (Sidebar) :**
|
| 490 |
-
Ajustez les paramètres pour la génération de texte.
|
| 491 |
-
|
| 492 |
-
* **Sélection du Modèle :** Choisissez un modèle Google Gemini Flash pour converser.
|
| 493 |
-
- **Important :** Assurez-vous que la clé API Google (`GOOGLE_API_KEY`) est configurée dans vos secrets.
|
| 494 |
-
- Changer de modèle **ne réinitialise pas** automatiquement les autres paramètres. Les paramètres actuels seront utilisés lors de la prochaine génération.
|
| 495 |
-
* **Option "Activer la recherche web" :** Activez cette option pour que l'IA (via les modèles Google Gemini) recherche sur le web et utilise les résultats pour enrichir ses réponses.
|
| 496 |
-
* **Paramètres de Génération :** Ajustez la longueur maximale de la réponse, la température (créativité), et le Top-P (sampling).
|
| 497 |
-
* **Message Système / Personnalité :** Définissez le rôle ou le style que l'IA de texte doit adopter.
|
| 498 |
-
* **Message de Bienvenue de l'IA :** Personnalisez le message initial.
|
| 499 |
-
* **Sélection des Avatars.**
|
| 500 |
-
* **Gestion de la Conversation :** Utilisez les boutons pour appliquer les paramètres actuels (et démarrer une nouvelle conversation) ou simplement effacer l'historique actuel.
|
| 501 |
-
* **Exporter la Conversation :** Téléchargez l'historique.
|
| 502 |
-
|
| 503 |
-
**3. Limitations :**
|
| 504 |
-
- **L'application est configurée uniquement pour la tâche de génération de texte.** Seuls les modèles Google Gemini Flash sont disponibles. Tenter d'utiliser un autre type de modèle entraînerait une erreur.
|
| 505 |
-
- La fonction "Recherche Web" est actuellement implémentée uniquement pour les modèles Google Gemini et peut entraîner des délais supplémentaires. Les résultats de recherche web s'affichent temporairement dans le chat avant la réponse de l'IA.
|
| 506 |
-
- Notez que l'application de votre **Message Système / Personnalité** aux modèles Google dépend de la prise en charge de la fonctionnalité `system_instruction` par la version de la bibliothèque Google Generative AI et le modèle spécifique utilisé. Si vous obtenez des erreurs (`unexpected keyword argument 'system_instruction'`), le message système sera ignoré pour ces modèles.
|
| 507 |
-
- Cette application ne stocke pas les données de manière permanente. L'historique est présent dans la session tant que l'application tourne.
|
| 508 |
-
|
| 509 |
-
Amusez-vous bien avec KolaChatBot !
|
| 510 |
-
""")
|
| 511 |
-
# -----------------------------------------------------------------------------
|
| 512 |
-
# Sidebar settings
|
| 513 |
# -----------------------------------------------------------------------------
|
| 514 |
with st.sidebar:
|
| 515 |
-
st.header("🛠
|
| 516 |
-
|
| 517 |
-
# Task is fixed to 'text'
|
| 518 |
-
st.subheader("🎯 Tâche IA : Texte (fixée)")
|
| 519 |
|
| 520 |
st.subheader("🧠 Sélection du Modèle")
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
st.session_state.selected_model_id = None
|
| 526 |
-
else:
|
| 527 |
-
model_options = {model['id']: model['name'] for model in all_available_models}
|
| 528 |
-
|
| 529 |
-
current_model_index = 0
|
| 530 |
-
if st.session_state.selected_model_id in model_options:
|
| 531 |
-
current_model_index = list(model_options.keys()).index(st.session_state.selected_model_id)
|
| 532 |
-
elif all_available_models:
|
| 533 |
-
default_model_from_list = next((m for m in AVAILABLE_MODELS), None)
|
| 534 |
-
if default_model_from_list:
|
| 535 |
-
st.session_state.selected_model_id = default_model_from_list['id']
|
| 536 |
-
current_model_index = 0
|
| 537 |
-
else:
|
| 538 |
-
st.session_state.selected_model_id = None
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
st.session_state._prev_model_id_before_selectbox = st.session_state.selected_model_id
|
| 542 |
-
selected_model_id_from_selectbox_value = st.selectbox(
|
| 543 |
-
"Choisir le modèle :",
|
| 544 |
-
options=list(model_options.keys()),
|
| 545 |
-
format_func=lambda x: model_options[x],
|
| 546 |
-
index=current_model_index,
|
| 547 |
-
key="selected_model_id",
|
| 548 |
-
help="Sélectionnez un modèle disponible (uniquement modèles texte pris en charge dans cette version)."
|
| 549 |
-
)
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
# --- Model Change Detection (without auto-resetting params) ---
|
| 553 |
-
model_id_changed_this_run = st.session_state.selected_model_id != st.session_state.get('_prev_model_id_before_selectbox')
|
| 554 |
-
|
| 555 |
-
# If model changed (and not triggered by an explicit button reset), reset history
|
| 556 |
-
if model_id_changed_this_run and not st.session_state.get('_reset_triggered', False):
|
| 557 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 558 |
-
st.
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
elif current_model_provider == 'google':
|
| 572 |
-
if not GOOGLE_API_KEY:
|
| 573 |
-
st.warning("❌ La clé API Google est manquante (`GOOGLE_API_KEY`). Les modèles Google ne fonctionneront pas.")
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
# --- Dynamic Parameter Settings based on the SELECTED MODEL's TYPE ---
|
| 577 |
-
st.subheader("⚙️ Paramètres")
|
| 578 |
-
current_selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.selected_model_id), None)
|
| 579 |
-
|
| 580 |
-
if st.session_state.selected_model_id and current_selected_model_info:
|
| 581 |
-
expander_title = f"Ajuster Paramètres : {current_selected_model_info['name']}" # Simplified title
|
| 582 |
-
with st.expander(expander_title, expanded=True):
|
| 583 |
-
# Since only text models are available, only display text parameters
|
| 584 |
-
if current_selected_model_info['type'] == 'text':
|
| 585 |
-
st.session_state.max_response_length = st.number_input(
|
| 586 |
-
"Max New Tokens (longueur max réponse) :",
|
| 587 |
-
min_value=20, max_value=8192, # Increased max_value
|
| 588 |
-
value=st.session_state.max_response_length,
|
| 589 |
-
step=10,
|
| 590 |
-
key="max_new_tokens_input",
|
| 591 |
-
help="Longueur maximale de la réponse de l'IA (en jetons ou tokens).",
|
| 592 |
-
)
|
| 593 |
-
st.session_state.temperature = st.slider(
|
| 594 |
-
"Temperature (créativité) :",
|
| 595 |
-
min_value=0.0, max_value=2.0,
|
| 596 |
-
value=st.session_state.temperature,
|
| 597 |
-
step=0.01,
|
| 598 |
-
key="temperature_input",
|
| 599 |
-
help="Contrôle le caractère aléatoire des réponses. Plus élevé = plus créatif/imprévisible.",
|
| 600 |
-
)
|
| 601 |
-
st.session_state.top_p = st.slider(
|
| 602 |
-
"Top-P (sampling) :",
|
| 603 |
-
min_value=0.01, max_value=1.0,
|
| 604 |
-
value=st.session_state.top_p,
|
| 605 |
-
step=0.01,
|
| 606 |
-
key="top_p_input",
|
| 607 |
-
help="Contrôle la diversité en limitant les options de tokens. Plus bas = moins diversifié. 1.0 = désactivé.",
|
| 608 |
-
)
|
| 609 |
-
|
| 610 |
-
st.session_state.system_message = st.text_area(
|
| 611 |
-
"Message Système / Personnalité :",
|
| 612 |
-
value=st.session_state.system_message,
|
| 613 |
-
height=100,
|
| 614 |
-
key="system_message_input",
|
| 615 |
-
help="Décrivez le rôle ou le style que l'IA de texte doit adopter. Notez que cette consigne peut être ignorée par certains modèles/versions API (voir Limitations).",
|
| 616 |
-
)
|
| 617 |
-
st.session_state.starter_message = st.text_area(
|
| 618 |
-
"Message de Bienvenue de l'IA :",
|
| 619 |
-
value=st.session_state.starter_message,
|
| 620 |
-
height=100,
|
| 621 |
-
key="starter_message_input",
|
| 622 |
-
help="Le premier message que l'IA affichera au début d'une nouvelle conversation textuelle ou après un reset.",
|
| 623 |
-
)
|
| 624 |
-
# Web Search Checkbox - only displayed for Google models
|
| 625 |
-
if current_selected_model_info['provider'] == 'google':
|
| 626 |
-
st.session_state.enable_web_search = st.checkbox(
|
| 627 |
-
"Activer la recherche web (pour les modèles Google)",
|
| 628 |
-
value=st.session_state.enable_web_search,
|
| 629 |
-
key="web_search_checkbox",
|
| 630 |
-
help="Si coché, les modèles Google Gemini effectueront une recherche DuckDuckGo et utiliseront les résultats pour répondre à votre prompt."
|
| 631 |
-
)
|
| 632 |
-
else: # If a non-Google model were added back
|
| 633 |
-
# Ensure checkbox state is not changed and it's disabled
|
| 634 |
-
# Use a unique key based on model_id to prevent state interference if model changes
|
| 635 |
-
st.session_state.enable_web_search = st.checkbox(
|
| 636 |
-
"Activer la recherche web (pour les modèles Google)",
|
| 637 |
-
value=False, # Always False if not Google
|
| 638 |
-
key=f"web_search_checkbox_{current_selected_model_info.get('id', 'nongoogle')}", # Unique key
|
| 639 |
-
disabled=True,
|
| 640 |
-
help="La recherche web n'est disponible que pour les modèles Google Gemini.",
|
| 641 |
-
)
|
| 642 |
-
|
| 643 |
-
elif current_selected_model_info['type'] == 't2i': # This branch should not be hit
|
| 644 |
-
st.info("Note: Ce modèle est de type 'Image', mais l'application ne supporte que la tâche 'Texte'.")
|
| 645 |
-
|
| 646 |
-
if current_selected_model_info and current_selected_model_info['type'] not in ['text', 't2i']: # Should not be hit
|
| 647 |
-
st.info(f"Type de modèle sélectionné ('{current_selected_model_info['type']}') inconnu ou non pris en charge pour l'affichage des paramètres.")
|
| 648 |
-
|
| 649 |
-
else:
|
| 650 |
-
st.info("Aucun modèle sélectionné, les paramètres ne sont pas disponibles.")
|
| 651 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 652 |
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
st.session_state._reset_triggered = False
|
| 657 |
|
|
|
|
|
|
|
| 658 |
|
| 659 |
-
st.subheader("🔄 Gestion
|
| 660 |
-
|
| 661 |
-
if
|
| 662 |
-
# Parameters are already updated in session state via key linking
|
| 663 |
-
# Reset history.
|
| 664 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 665 |
-
st.
|
| 666 |
-
st.
|
| 667 |
-
|
| 668 |
-
|
| 669 |
-
# Clear history button - simpler, just history
|
| 670 |
-
if st.button("🗑️ Effacer la Conversation Actuelle", help="Efface l'historique de conversation mais conserve les paramètres actuels."):
|
| 671 |
-
# Reset history.
|
| 672 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 673 |
-
st.
|
| 674 |
-
st.
|
| 675 |
-
st.rerun() # Rerun immediately to show empty chat with starter message
|
| 676 |
-
|
| 677 |
-
|
| 678 |
-
st.subheader("📥 Exporter la Conversation")
|
| 679 |
-
# Check if there's actual history beyond the initial assistant message
|
| 680 |
-
export_possible = len([msg for msg in st.session_state.chat_history if msg.get("role") != "system"]) > 1
|
| 681 |
|
| 682 |
-
|
|
|
|
| 683 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 684 |
-
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
st.download_button(
|
| 688 |
-
label="TXT",
|
| 689 |
-
data=format_history_to_txt(st.session_state.chat_history),
|
| 690 |
-
file_name=f"kolachat_conversation_{timestamp}.txt",
|
| 691 |
-
mime="text/plain",
|
| 692 |
-
use_container_width=True
|
| 693 |
-
)
|
| 694 |
-
with col_export2:
|
| 695 |
-
st.download_button(
|
| 696 |
-
label="JSON",
|
| 697 |
-
data=format_history_to_json(st.session_state.chat_history),
|
| 698 |
-
file_name=f"kolachat_conversation_{timestamp}.json",
|
| 699 |
-
mime="application/json",
|
| 700 |
-
use_container_width=True
|
| 701 |
-
)
|
| 702 |
-
with col_export3:
|
| 703 |
-
st.download_button(
|
| 704 |
-
label="Markdown",
|
| 705 |
-
data=format_history_to_md(st.session_state.chat_history),
|
| 706 |
-
file_name=f"kolachat_conversation_{timestamp}.md",
|
| 707 |
-
mime="text/markdown",
|
| 708 |
-
use_container_width=True
|
| 709 |
-
)
|
| 710 |
else:
|
| 711 |
-
st.caption("
|
| 712 |
-
|
| 713 |
st.divider()
|
| 714 |
-
st.subheader("Crédits")
|
| 715 |
st.markdown("""
|
| 716 |
-
**Auteur :** Sidoine K. YEBADOKPO
|
| 717 |
-
*Expert en Analyse de Données*
|
| 718 |
-
|
| 719 |
-
📧 [syebadokpo@gmail.com](mailto:syebadokpo@gmail.com)
|
| 720 |
📞 +229 96 91 13 46
|
| 721 |
""")
|
| 722 |
|
| 723 |
-
|
| 724 |
# -----------------------------------------------------------------------------
|
| 725 |
-
#
|
| 726 |
# -----------------------------------------------------------------------------
|
| 727 |
-
#
|
| 728 |
-
|
| 729 |
-
|
| 730 |
-
with
|
| 731 |
-
|
| 732 |
-
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
|
| 736 |
-
|
| 737 |
-
|
| 738 |
-
|
| 739 |
-
|
| 740 |
-
|
| 741 |
-
|
| 742 |
-
|
| 743 |
-
|
| 744 |
-
|
| 745 |
-
|
| 746 |
-
|
| 747 |
-
|
| 748 |
-
|
| 749 |
-
|
| 750 |
-
|
| 751 |
-
|
| 752 |
-
|
| 753 |
-
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
|
| 757 |
-
|
| 758 |
-
|
| 759 |
-
|
| 760 |
-
|
| 761 |
-
|
| 762 |
-
|
| 763 |
-
|
| 764 |
-
|
| 765 |
-
|
| 766 |
-
chatHistory.scrollTop = chatHistory.scrollHeight;
|
| 767 |
-
}
|
| 768 |
-
</script>
|
| 769 |
-
""",
|
| 770 |
-
unsafe_allow_html=True,
|
| 771 |
-
)
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
# Area for the chat input box
|
| 775 |
-
input_container = st.container()
|
| 776 |
-
with input_container:
|
| 777 |
-
if st.session_state.selected_model_id is not None:
|
| 778 |
-
current_selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.selected_model_id), None)
|
| 779 |
-
placeholder_text = f"Entrez votre message pour la conversation texte..." # Fixed placeholder text as task is text
|
| 780 |
-
user_input = st.chat_input(placeholder=placeholder_text, disabled=False)
|
| 781 |
-
else:
|
| 782 |
-
user_input = st.chat_input(placeholder=f"Veuillez configurer un modèle disponible pour démarrer...", disabled=True)
|
| 783 |
-
st.info(f"Veuillez sélectionner un modèle disponible dans la barre latérale pour commencer. Il n'y a actuellement aucun modèle configuré ou sélectionné.")
|
| 784 |
-
|
| 785 |
-
|
| 786 |
-
# --- Logic to handle user input and trigger generation ---
|
| 787 |
-
# This part runs only when user_input is NOT None (i.e., after the user hits Enter)
|
| 788 |
-
# AND a model is selected AND the reset flag is not set (from buttons)
|
| 789 |
-
if user_input and st.session_state.selected_model_id and not st.session_state.get('_reset_triggered', False):
|
| 790 |
-
# 1. Append the user's message to the chat history
|
| 791 |
-
user_message_entry = {"role": "user", "content": user_input, "type": "text"} # Always text input now
|
| 792 |
-
st.session_state.chat_history.append(user_message_entry)
|
| 793 |
-
|
| 794 |
-
# --- Trigger a rerun to immediately show the user message ---
|
| 795 |
-
# This restarts the script, redrawing the chat (now including the user's message).
|
| 796 |
-
# The code below the chat_input block will then execute on this rerun.
|
| 797 |
-
st.rerun()
|
| 798 |
-
|
| 799 |
-
|
| 800 |
-
# --- This block executes on reruns, check if processing is needed ---
|
| 801 |
-
# Processing is needed if the last message was from the user AND we haven't added an assistant response yet,
|
| 802 |
-
# AND a model is selected, AND a reset was NOT just triggered by a button.
|
| 803 |
-
if st.session_state.chat_history and st.session_state.chat_history[-1]["role"] == "user" and st.session_state.selected_model_id and not st.session_state.get('_reset_triggered', False):
|
| 804 |
-
|
| 805 |
-
# Find info about the currently selected model
|
| 806 |
-
selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.selected_model_id), None)
|
| 807 |
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
| 817 |
-
|
| 818 |
-
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
assistant_avatar = st.session_state.avatars.get("assistant", "❓")
|
| 834 |
-
with st.chat_message("assistant", avatar=assistant_avatar):
|
| 835 |
-
# The spinner message is shown here, wrapping the API call
|
| 836 |
-
with st.spinner(f"KolaChatBot utilise {current_model_display_name} pour générer... 🤔"):
|
| 837 |
-
# Call get_text_response (which now handles RAG internally for Google)
|
| 838 |
-
assistant_response_entry = get_text_response(
|
| 839 |
-
selected_model_id=st.session_state.selected_model_id,
|
| 840 |
-
system_prompt=st.session_state.system_message,
|
| 841 |
-
full_chat_history=st.session_state.chat_history # Pass full history
|
| 842 |
-
)
|
| 843 |
-
# Display the content from the response entry object returned by get_text_response
|
| 844 |
-
# This happens *after* the spinner block
|
| 845 |
-
if assistant_response_entry and assistant_response_entry.get("type") == "text":
|
| 846 |
-
st.markdown(assistant_response_entry.get("content", "Erreur: Réponse texte vide."))
|
| 847 |
-
elif assistant_response_entry: # Handle errors returned as text from the generation function
|
| 848 |
-
st.error(f"Erreur lors de la génération: {assistant_response_entry.get('content', 'Raison inconnue.')}")
|
| 849 |
-
else: # Handle None response from get_text_response
|
| 850 |
-
st.error("La fonction de génération de texte n'a pas renvoyé de réponse valide.")
|
| 851 |
-
|
| 852 |
-
|
| 853 |
-
# --- Append the generated response entry to chat history ---
|
| 854 |
-
if assistant_response_entry is not None:
|
| 855 |
-
# Check if the last message added was the search message placeholder.
|
| 856 |
-
# If so, replace it with the actual response. Otherwise, just append.
|
| 857 |
-
if st.session_state.chat_history[-1]["role"] == "assistant" and \
|
| 858 |
-
st.session_state.chat_history[-1]["content"].startswith("*(KolaChatBot recherche web"):
|
| 859 |
-
# Remove the temporary search message
|
| 860 |
-
st.session_state.chat_history.pop()
|
| 861 |
-
# Append the actual response
|
| 862 |
-
st.session_state.chat_history.append(assistant_response_entry)
|
| 863 |
-
else:
|
| 864 |
-
# Just append the response if no search message was pending
|
| 865 |
-
st.session_state.chat_history.append(assistant_response_entry)
|
| 866 |
-
|
| 867 |
-
# No need for another st.rerun() here, the UI is already updated by the first rerun.
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
# --- Clear reset flag at the very end of the script if it was set ---
|
| 871 |
-
# This ensures the flag is only True for one full rerun cycle after a button click
|
| 872 |
-
if st.session_state.get('_reset_triggered', False):
|
| 873 |
-
st.session_state._reset_triggered = False
|
|
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import google.generativeai as genai
|
| 6 |
+
from duckduckgo_search import DDGS
|
| 7 |
+
from dotenv import load_dotenv
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# -----------------------------------------------------------------------------
|
| 10 |
+
# Configuration de l'environnement et des constantes
|
| 11 |
# -----------------------------------------------------------------------------
|
| 12 |
load_dotenv()
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# Configurez vos clés API dans un fichier .env ou dans les secrets de Streamlit Cloud
|
| 15 |
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 16 |
+
|
| 17 |
+
# Configuration de la bibliothèque Google
|
| 18 |
+
if GOOGLE_API_KEY:
|
| 19 |
+
try:
|
| 20 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
| 21 |
+
except Exception as e:
|
| 22 |
+
# Cette erreur est critique, donc on l'affiche de manière proéminente.
|
| 23 |
+
st.error(f"Erreur de configuration de l'API Google : {e}")
|
| 24 |
+
else:
|
| 25 |
+
# Avertissement si la clé n'est pas trouvée au démarrage.
|
| 26 |
+
st.warning("La clé API Google (GOOGLE_API_KEY) n'est pas configurée. L'application ne pourra pas fonctionner.")
|
| 27 |
+
|
| 28 |
|
| 29 |
# -----------------------------------------------------------------------------
|
| 30 |
+
# Définition des modèles disponibles
|
| 31 |
# -----------------------------------------------------------------------------
|
| 32 |
+
# Remplacé par les modèles Gemini les plus récents et pertinents.
|
| 33 |
+
# 'latest' pointe toujours vers la version la plus récente du modèle.
|
| 34 |
AVAILABLE_MODELS = [
|
|
|
|
| 35 |
{
|
| 36 |
"id": "gemini-1.5-flash-latest",
|
| 37 |
+
"name": "Gemini 1.5 Flash (Rapide et efficace)",
|
| 38 |
"provider": "google",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
},
|
| 40 |
+
{
|
| 41 |
+
"id": "gemini-1.5-pro-latest",
|
| 42 |
+
"name": "Gemini 1.5 Pro (Le plus performant)",
|
| 43 |
"provider": "google",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
]
|
| 46 |
|
| 47 |
+
DEFAULT_MODEL_ID = "gemini-1.5-flash-latest"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
# -----------------------------------------------------------------------------
|
| 50 |
+
# Initialisation du Session State
|
|
|
|
| 51 |
# -----------------------------------------------------------------------------
|
| 52 |
+
def initialize_session_state():
|
| 53 |
+
"""Initialise toutes les variables nécessaires dans le session state pour éviter les erreurs."""
|
| 54 |
+
|
| 55 |
+
# Messages par défaut
|
| 56 |
+
DEFAULT_SYSTEM_MESSAGE = "Vous êtes KolaChatBot, un assistant IA serviable, créatif et honnête. Répondez en français."
|
| 57 |
+
DEFAULT_STARTER_MESSAGE = "Bonjour ! Je suis KolaChatBot. Comment puis-je vous aider aujourd'hui ? 🤖"
|
| 58 |
+
|
| 59 |
+
# Initialisation de base
|
| 60 |
+
if "selected_model_id" not in st.session_state:
|
| 61 |
+
st.session_state.selected_model_id = DEFAULT_MODEL_ID
|
| 62 |
+
if "system_message" not in st.session_state:
|
| 63 |
+
st.session_state.system_message = DEFAULT_SYSTEM_MESSAGE
|
| 64 |
+
if "starter_message" not in st.session_state:
|
| 65 |
+
st.session_state.starter_message = DEFAULT_STARTER_MESSAGE
|
| 66 |
+
if "chat_history" not in st.session_state:
|
| 67 |
+
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
+
# Paramètres de génération
|
| 70 |
+
if "max_response_length" not in st.session_state:
|
| 71 |
+
st.session_state.max_response_length = 1024
|
| 72 |
+
if "temperature" not in st.session_state:
|
| 73 |
+
st.session_state.temperature = 0.7
|
| 74 |
+
if "top_p" not in st.session_state:
|
| 75 |
+
st.session_state.top_p = 0.95
|
| 76 |
+
|
| 77 |
+
# Fonctionnalité de recherche web
|
| 78 |
+
if "enable_web_search" not in st.session_state:
|
| 79 |
+
st.session_state.enable_web_search = False
|
| 80 |
+
|
| 81 |
+
# Stockage temporaire pour les résultats de recherche (pour affichage)
|
| 82 |
+
if 'last_search_results' not in st.session_state:
|
| 83 |
+
st.session_state.last_search_results = None
|
| 84 |
+
|
| 85 |
+
# Appel de la fonction d'initialisation au début du script
|
| 86 |
+
initialize_session_state()
|
| 87 |
|
| 88 |
# -----------------------------------------------------------------------------
|
| 89 |
+
# Fonctions d'export de la conversation
|
| 90 |
# -----------------------------------------------------------------------------
|
| 91 |
def format_history_to_txt(chat_history: list[dict]) -> str:
|
| 92 |
+
lines = [f"KolaChatBot Conversation - Exporté le {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"]
|
| 93 |
for message in chat_history:
|
|
|
|
| 94 |
role = "Utilisateur" if message["role"] == "user" else "KolaChatBot"
|
| 95 |
+
lines.append(f"--- {role} ---\n{message['content']}\n\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
return "".join(lines)
|
| 97 |
|
| 98 |
def format_history_to_json(chat_history: list[dict]) -> str:
|
| 99 |
+
export_data = {
|
| 100 |
+
"export_date": datetime.now().isoformat(),
|
| 101 |
+
"conversation": chat_history
|
| 102 |
+
}
|
| 103 |
+
return json.dumps(export_data, indent=2, ensure_ascii=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
def format_history_to_md(chat_history: list[dict]) -> str:
|
| 106 |
lines = [f"# KolaChatBot Conversation\n*Exporté le {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n\n"]
|
| 107 |
for message in chat_history:
|
| 108 |
+
avatar = "👤" if message["role"] == "user" else "🤖"
|
| 109 |
+
role_label = "Utilisateur" if message["role"] == "user" else "KolaChatBot"
|
| 110 |
+
lines.append(f"### {avatar} {role_label}\n\n{message['content']}\n\n---\n\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
return "".join(lines)
|
| 112 |
|
| 113 |
# -----------------------------------------------------------------------------
|
| 114 |
+
# Fonctions principales (Recherche Web et Appel API)
|
| 115 |
# -----------------------------------------------------------------------------
|
| 116 |
+
def perform_web_search(query: str, num_results: int = 5) -> tuple[str, list]:
|
| 117 |
+
"""Effectue une recherche web avec DuckDuckGo et retourne le contexte et les sources."""
|
| 118 |
+
st.session_state.last_search_results = None # Réinitialiser à chaque recherche
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
try:
|
| 120 |
+
with DDGS() as ddgs:
|
| 121 |
+
results = list(ddgs.text(keywords=query, region='fr-fr', max_results=num_results))
|
| 122 |
+
if not results:
|
| 123 |
+
return "Aucun résultat de recherche trouvé.", []
|
| 124 |
+
|
| 125 |
+
formatted_context = ""
|
| 126 |
+
source_details = []
|
| 127 |
+
for i, res in enumerate(results):
|
| 128 |
+
formatted_context += f"[Source {i+1}]\nTitre: {res.get('title', 'N/A')}\nExtrait: {res.get('body', 'N/A')}\nURL: {res.get('href', 'N/A')}\n\n"
|
| 129 |
+
source_details.append(res)
|
| 130 |
+
|
| 131 |
+
st.session_state.last_search_results = source_details # Sauvegarder pour affichage ultérieur
|
| 132 |
+
return formatted_context, source_details
|
| 133 |
except Exception as e:
|
| 134 |
+
return f"Erreur lors de la recherche web: {e}", []
|
| 135 |
|
| 136 |
+
def get_gemini_response_stream(model_id: str, system_prompt: str, chat_history_for_api: list[dict], params: dict):
|
| 137 |
+
"""Appelle l'API Google Gemini et retourne un générateur (stream) pour la réponse."""
|
| 138 |
+
if not GOOGLE_API_KEY:
|
| 139 |
+
yield "Erreur: La clé API Google n'est pas configurée. Veuillez l'ajouter pour continuer."
|
| 140 |
+
return
|
|
|
|
|
|
|
| 141 |
|
| 142 |
+
try:
|
| 143 |
+
model = genai.GenerativeModel(
|
| 144 |
+
model_id,
|
| 145 |
+
system_instruction=system_prompt
|
| 146 |
+
)
|
| 147 |
|
| 148 |
+
gemini_history = []
|
| 149 |
for msg in chat_history_for_api:
|
| 150 |
+
role = 'user' if msg['role'] == 'user' else 'model'
|
| 151 |
+
gemini_history.append({"role": role, "parts": [msg['content']]})
|
| 152 |
+
|
| 153 |
+
current_prompt = gemini_history.pop()['parts'][0]
|
| 154 |
|
| 155 |
generation_config = genai.types.GenerationConfig(
|
| 156 |
+
max_output_tokens=params.get("max_new_tokens"),
|
| 157 |
+
temperature=params.get("temperature"),
|
| 158 |
+
top_p=params.get("top_p"),
|
| 159 |
)
|
| 160 |
+
|
| 161 |
+
response_stream = model.generate_content(
|
| 162 |
+
current_prompt,
|
|
|
|
| 163 |
generation_config=generation_config,
|
| 164 |
+
history=gemini_history,
|
| 165 |
+
stream=True
|
| 166 |
)
|
| 167 |
|
| 168 |
+
for chunk in response_stream:
|
| 169 |
+
if chunk.parts:
|
| 170 |
+
yield chunk.text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
except Exception as e:
|
| 173 |
+
yield f"Erreur lors de l'appel à l'API Google: {e}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
# -----------------------------------------------------------------------------
|
| 176 |
+
# Configuration de la page Streamlit
|
| 177 |
# -----------------------------------------------------------------------------
|
| 178 |
+
st.set_page_config(page_title="KolaChatBot IA", page_icon="🤖", layout="wide")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
|
| 180 |
+
st.title("🤖 KolaChatBot IA")
|
| 181 |
+
selected_model_info = next((m for m in AVAILABLE_MODELS if m['id'] == st.session_state.selected_model_id), None)
|
| 182 |
+
st.markdown(f"*Modèle actuel : **{selected_model_info['name']}***")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
# -----------------------------------------------------------------------------
|
| 185 |
+
# Barre latérale (Sidebar)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
# -----------------------------------------------------------------------------
|
| 187 |
with st.sidebar:
|
| 188 |
+
st.header("🛠️ Configuration")
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
st.subheader("🧠 Sélection du Modèle")
|
| 191 |
+
model_options = {model['id']: model['name'] for model in AVAILABLE_MODELS}
|
| 192 |
+
|
| 193 |
+
def on_model_change():
|
| 194 |
+
"""Callback pour réinitialiser la conversation lors du changement de modèle."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 196 |
+
st.toast(f"Modèle changé. Conversation réinitialisée.")
|
| 197 |
+
|
| 198 |
+
st.selectbox(
|
| 199 |
+
"Choisir le modèle :",
|
| 200 |
+
options=list(model_options.keys()),
|
| 201 |
+
format_func=lambda x: model_options[x],
|
| 202 |
+
key="selected_model_id",
|
| 203 |
+
on_change=on_model_change,
|
| 204 |
+
help="Changer de modèle démarre une nouvelle conversation."
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
if not GOOGLE_API_KEY:
|
| 208 |
+
st.error("❌ Clé API Google manquante.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
+
st.subheader("⚙️ Paramètres de Génération")
|
| 211 |
+
with st.expander("Ajuster les paramètres", expanded=False):
|
| 212 |
+
st.slider("Max Tokens", 128, 8192, key="max_response_length", step=128)
|
| 213 |
+
st.slider("Température", 0.0, 2.0, key="temperature", step=0.05)
|
| 214 |
+
st.slider("Top-P", 0.0, 1.0, key="top_p", step=0.05)
|
| 215 |
|
| 216 |
+
st.subheader("👤 Personnalisation")
|
| 217 |
+
st.text_area("Message Système / Personnalité", height=100, key="system_message")
|
| 218 |
+
st.text_area("Message de bienvenue", height=100, key="starter_message")
|
|
|
|
| 219 |
|
| 220 |
+
st.subheader("🌐 Recherche Web (RAG)")
|
| 221 |
+
st.checkbox("Activer la recherche web", key="enable_web_search")
|
| 222 |
|
| 223 |
+
st.subheader("🔄 Gestion")
|
| 224 |
+
col1, col2 = st.columns(2)
|
| 225 |
+
if col1.button("♻️ Nouvelle Conv.", use_container_width=True, help="Démarre une nouvelle conversation."):
|
|
|
|
|
|
|
| 226 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 227 |
+
st.toast("Nouvelle conversation démarrée.")
|
| 228 |
+
st.rerun()
|
| 229 |
+
if col2.button("🗑️ Effacer", type="primary", use_container_width=True, help="Efface l'historique de la conversation."):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message, "type": "text"}]
|
| 231 |
+
st.toast("Conversation effacée.")
|
| 232 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
|
| 234 |
+
st.subheader("📥 Exporter")
|
| 235 |
+
if len(st.session_state.chat_history) > 1:
|
| 236 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 237 |
+
st.download_button("TXT", format_history_to_txt(st.session_state.chat_history), f"kolachat_{timestamp}.txt")
|
| 238 |
+
st.download_button("JSON", format_history_to_json(st.session_state.chat_history), f"kolachat_{timestamp}.json")
|
| 239 |
+
st.download_button("Markdown", format_history_to_md(st.session_state.chat_history), f"kolachat_{timestamp}.md")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
else:
|
| 241 |
+
st.caption("La conversation est vide.")
|
| 242 |
+
|
| 243 |
st.divider()
|
|
|
|
| 244 |
st.markdown("""
|
| 245 |
+
**Auteur :** Sidoine K. YEBADOKPO
|
| 246 |
+
*Expert en Analyse de Données*
|
| 247 |
+
📧 syebadokpo@gmail.com
|
|
|
|
| 248 |
📞 +229 96 91 13 46
|
| 249 |
""")
|
| 250 |
|
|
|
|
| 251 |
# -----------------------------------------------------------------------------
|
| 252 |
+
# Interface de Chat Principale
|
| 253 |
# -----------------------------------------------------------------------------
|
| 254 |
+
# Affichage de l'historique des messages
|
| 255 |
+
for message in st.session_state.chat_history:
|
| 256 |
+
avatar = "👤" if message["role"] == "user" else "🤖"
|
| 257 |
+
with st.chat_message(message["role"], avatar=avatar):
|
| 258 |
+
st.markdown(message["content"])
|
| 259 |
+
# Afficher les sources si elles sont attachées au message de l'assistant
|
| 260 |
+
if message.get("sources"):
|
| 261 |
+
with st.expander("Sources web consultées", expanded=False):
|
| 262 |
+
for i, source in enumerate(message["sources"]):
|
| 263 |
+
st.markdown(f"**{i+1}. {source.get('title', 'Titre inconnu')}**\n"
|
| 264 |
+
f"[*Source*]({source.get('href', '#')})\n"
|
| 265 |
+
f"> {source.get('body', 'Aucun extrait disponible.')}\n---")
|
| 266 |
+
|
| 267 |
+
# Logique de traitement de l'entrée utilisateur
|
| 268 |
+
if prompt := st.chat_input(f"Envoyer un message...", disabled=not GOOGLE_API_KEY):
|
| 269 |
+
# Ajouter et afficher le message de l'utilisateur
|
| 270 |
+
st.session_state.chat_history.append({"role": "user", "content": prompt, "type": "text"})
|
| 271 |
+
with st.chat_message("user", avatar="👤"):
|
| 272 |
+
st.markdown(prompt)
|
| 273 |
+
|
| 274 |
+
# Préparer et afficher la réponse de l'assistant
|
| 275 |
+
with st.chat_message("assistant", avatar="🤖"):
|
| 276 |
+
history_for_api = st.session_state.chat_history.copy()
|
| 277 |
+
|
| 278 |
+
# Logique de Recherche Web (RAG)
|
| 279 |
+
if st.session_state.enable_web_search:
|
| 280 |
+
with st.spinner("KolaChatBot recherche sur le web..."):
|
| 281 |
+
search_context, sources = perform_web_search(prompt)
|
| 282 |
+
|
| 283 |
+
if sources:
|
| 284 |
+
rag_prompt = (
|
| 285 |
+
"En te basant STRICTEMENT sur les informations suivantes issues d'une recherche web, réponds à la question de l'utilisateur. "
|
| 286 |
+
"Cite tes sources en utilisant le format [Source X] après chaque phrase concernée.\n\n"
|
| 287 |
+
f"--- CONTEXTE DE LA RECHERCHE ---\n{search_context}\n--- FIN DU CONTEXTE ---\n\n"
|
| 288 |
+
f"Question de l'utilisateur : {prompt}"
|
| 289 |
+
)
|
| 290 |
+
history_for_api[-1]['content'] = rag_prompt
|
| 291 |
+
else:
|
| 292 |
+
st.toast("La recherche web n'a pas fourni de résultats.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
|
| 294 |
+
# Appel API et Affichage en Streaming
|
| 295 |
+
params = {
|
| 296 |
+
"max_new_tokens": st.session_state.max_response_length,
|
| 297 |
+
"temperature": st.session_state.temperature,
|
| 298 |
+
"top_p": st.session_state.top_p,
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
# Utiliser st.write_stream pour afficher la réponse mot par mot
|
| 302 |
+
response_content = st.write_stream(get_gemini_response_stream(
|
| 303 |
+
st.session_state.selected_model_id,
|
| 304 |
+
st.session_state.system_message,
|
| 305 |
+
history_for_api,
|
| 306 |
+
params
|
| 307 |
+
))
|
| 308 |
+
|
| 309 |
+
# Sauvegarder la réponse complète de l'assistant dans l'historique
|
| 310 |
+
assistant_message = {"role": "assistant", "content": response_content, "type": "text"}
|
| 311 |
+
if st.session_state.get('last_search_results'):
|
| 312 |
+
assistant_message["sources"] = st.session_state.last_search_results
|
| 313 |
+
st.session_state.last_search_results = None # Nettoyer
|
| 314 |
+
|
| 315 |
+
st.session_state.chat_history.append(assistant_message)
|
| 316 |
+
# Un seul `rerun` est nécessaire si les sources ont été utilisées pour les réafficher correctement.
|
| 317 |
+
if "sources" in assistant_message:
|
| 318 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|