Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from ctransformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Configuration de la page Streamlit | |
| st.set_page_config(page_title="Assistant Mathématique", page_icon="🔢", layout="wide") | |
| def load_model(): | |
| """Charge le modèle et le tokenizer (mise en cache par Streamlit)""" | |
| model_name = "analist/deepseek-math-gguf" # Remplacez par votre nom de modèle | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "analist/deepseek-math-gguf", model_file="model.gguf" | |
| ) | |
| return model, tokenizer | |
| def generate_response(prompt, model, tokenizer): | |
| """Génère une réponse à partir du prompt""" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=1200, | |
| temperature=0.7, | |
| do_sample=True, | |
| top_p=0.95, | |
| ) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response.split("### Response:")[-1].strip() | |
| def format_prompt(question): | |
| """Formate le prompt comme pendant l'entraînement""" | |
| return f"""Below is an instruction that describes a task, paired with an input that provides further context. | |
| Write a response that appropriately completes the request. | |
| Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response. | |
| Your goal is to teach maths a beginner so make it friendly and accessible. Break down your chain of thoughts as for him/her to understand. | |
| ### Instruction: | |
| You are a maths expert with advanced knowledge in pedagogy, arithmetics, geometry, analysis, calculus. | |
| Please answer the following questions. | |
| ### Question: | |
| {question} | |
| ### Response:""" | |
| def main(): | |
| # Titre de l'application | |
| st.title("🔢 Assistant Mathématique") | |
| st.markdown("---") | |
| # Chargement du modèle | |
| with st.spinner("Chargement du modèle..."): | |
| model, tokenizer = load_model() | |
| # Initialisation de l'historique des messages dans la session state | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Affichage de l'historique des messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Zone de saisie utilisateur | |
| if question := st.chat_input("Posez votre question mathématique..."): | |
| # Afficher la question de l'utilisateur | |
| with st.chat_message("user"): | |
| st.markdown(question) | |
| st.session_state.messages.append({"role": "user", "content": question}) | |
| # Générer et afficher la réponse | |
| with st.chat_message("assistant"): | |
| with st.spinner("Réflexion en cours..."): | |
| prompt = format_prompt(question) | |
| response = generate_response(prompt, model, tokenizer) | |
| response = response.replace('<think>', '') | |
| st.markdown(response) | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| # Bouton pour effacer l'historique | |
| if st.sidebar.button("Effacer l'historique"): | |
| st.session_state.messages = [] | |
| st.rerun() | |
| # Informations dans la barre latérale | |
| with st.sidebar: | |
| st.markdown("### À propos") | |
| st.markdown(""" | |
| Cet assistant utilise un modèle DeepSeek spécialement entraîné pour: | |
| - Expliquer les concepts mathématiques | |
| - Résoudre des problèmes étape par étape | |
| - Fournir des explications claires et adaptées aux débutants | |
| """) | |
| if __name__ == "__main__": | |
| main() |