Spaces:
Running
Running
| import streamlit as st | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| import random | |
| import pandas as pd | |
| # --- CONFIGURATION --- | |
| st.set_page_config(page_title="FINISHA-F-SCRATCH Local Arena", layout="wide") | |
| st.title("⚔️ SLM FROM SCRATCH Model Arena (Local Loading) ! choissez le SLM le plus unique!") | |
| # Liste des modèles Raana-ia (Assurez-vous qu'ils tiennent en mémoire) | |
| MODELS_LIST = [ | |
| "Finisha-f-scratch/SMCLEM", | |
| "Finisha-f-scratch/InutileGheya", | |
| "Finisha-f-scratch/Gheya-111m", | |
| "Finisha-f-scratch/Gheya-Nacid-instruct-v1", | |
| "Finisha-f-scratch/Mini-mistral-v1", | |
| "Finisha-f-scratch/Charlotte-amity", | |
| "Finisha-f-scratch/Charlotte-amity-v2", | |
| "Finisha-f-scratch/Tiny-charlotte", | |
| "Finisha-f-scratch/mini-gamia", | |
| "Finisha-f-scratch/SoraNova", | |
| "Finisha-f-scratch/Expedia-LLM", | |
| "Clem27-assistants/Learnia-Empathic-Tchat", | |
| "Finisha-F-scratch/Learnia-tchat-v1", | |
| "Finisha-F-scratch/Neko-charlotte", | |
| "Finisha-F-scratch/Sala", | |
| "Finisha-F-scratch/Charlotte-gheya", | |
| "Finisha-F-scratch/microBook", | |
| "Finisha-F-scratch/Chichalia-v1", | |
| "Finisha-f-scratch/Claire", | |
| "Finisha-F-scratch/Rosa-4M", | |
| "Finisha-F-scratch/Nelya", | |
| "Finisha-F-scratch/Nelya-neko", | |
| "Finisha-F-scratch/Dona-KITY-10m", | |
| "Finisha-F-scratch/KLA-SLM-CODING", | |
| "Finisha-F-scratch/Tiny-DonaKitty", | |
| "Finisha-F-scratch/Serena", | |
| "Finisha-F-scratch/Perso-SLM", | |
| "Finisha-F-scratch/Tiny-Rosa", | |
| "Finisha-F-scratch/Iris-La-guepe", | |
| "Finisha-F-scratch/Ilyana-lamina-Nacid", | |
| "Finisha-F-scratch/Ilyana-pretrain", | |
| "Finisha-F-scratch/Gheya-63M", | |
| "Finisha-F-scratch/Copina", | |
| "Finisha-F-scratch/Ayako-CHINESS", | |
| "Finisha-F-scratch/Tiny-lamina-English", | |
| "Finisha-F-scratch/Gheya-Nacid", | |
| "Finisha-F-scratch/Learnia-business", | |
| "Finisha-F-scratch/Lam-pest", | |
| "Finisha-F-scratch/ReeCi", | |
| "Finisha-F-scratch/melta-english", | |
| "Finisha-F-scratch/Maya-152M-Flowers", | |
| "Finisha-F-scratch/Lam-4-zero-F", | |
| "Finisha-F-scratch/Learnia", | |
| "Finisha-F-scratch/Qsana-coder-base", | |
| "Finisha-F-scratch/Coliria", | |
| "Finisha-F-scratch/Charlotte-2b", | |
| "Finisha-F-scratch/Natalia-pretrain", | |
| "Finisha-F-scratch/LilyStory", | |
| "Finisha-F-scratch/Nephaella" | |
| ] | |
| # --- CHARGEMENT DES MODÈLES (CACHÉ) --- | |
| def load_model_pipeline(model_id): | |
| """Charge le modèle et le tokenizer en mémoire.""" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| # On utilise device_map="auto" pour gérer le GPU si disponible | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| device_map="auto" | |
| ) | |
| return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # --- LOGIQUE DE VOTE ET SCORE --- | |
| if 'scores' not in st.session_state: | |
| st.session_state.scores = {m: 1200 for m in MODELS_LIST} | |
| if "model_a" not in st.session_state: | |
| st.session_state.model_a = "" | |
| st.session_state.model_b = "" | |
| st.session_state.resp_a = "" | |
| st.session_state.resp_b = "" | |
| st.session_state.voted = False | |
| # --- INTERFACE UTILISATEUR --- | |
| user_prompt = st.text_area("Entrez votre question :", placeholder="Écris un poème sur l'intelligence artificielle.") | |
| if st.button("Lancer le duel"): | |
| if user_prompt: | |
| # Sélection aléatoire | |
| sampled = random.sample(MODELS_LIST, 2) | |
| st.session_state.model_a, st.session_state.model_b = sampled | |
| with st.spinner(f"Chargement et génération en cours..."): | |
| # Génération Modèle A | |
| pipe_a = load_model_pipeline(st.session_state.model_a) | |
| out_a = pipe_a(user_prompt, max_new_tokens=150, do_sample=True, temperature=0.7) | |
| st.session_state.resp_a = out_a[0]['generated_text'].replace(user_prompt, "") | |
| # Génération Modèle B | |
| pipe_b = load_model_pipeline(st.session_state.model_b) | |
| out_b = pipe_b(user_prompt, max_new_tokens=150, do_sample=True, temperature=0.7) | |
| st.session_state.resp_b = out_b[0]['generated_text'].replace(user_prompt, "") | |
| st.session_state.voted = False | |
| else: | |
| st.error("Le prompt est vide !") | |
| # Affichage des résultats | |
| if st.session_state.resp_a: | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.info(f"**Réponse A :**\n\n{st.session_state.resp_a}") | |
| with col2: | |
| st.info(f"**Réponse B :**\n\n{st.session_state.resp_b}") | |
| if not st.session_state.voted: | |
| c1, c2, c3 = st.columns(3) | |
| if c1.button("A est meilleur"): | |
| st.session_state.scores[st.session_state.model_a] += 20 | |
| st.session_state.voted = True | |
| if c2.button("Égalité"): | |
| st.session_state.voted = True | |
| if c3.button("B est meilleur"): | |
| st.session_state.scores[st.session_state.model_b] += 20 | |
| st.session_state.voted = True | |
| if st.session_state.voted: | |
| st.success(f"Résultat : A était **{st.session_state.model_a}** | B était **{st.session_state.model_b}**") | |
| if st.button("Nouveau duel"): | |
| st.session_state.resp_a = "" | |
| st.rerun() | |
| # Classement | |
| st.divider() | |
| st.subheader("📊 Leaderboard") | |
| st.table(pd.DataFrame(st.session_state.scores.items(), columns=["Modèle", "ELO"]).sort_values("ELO", ascending=False)) | |