import streamlit as st
import requests
import tempfile
import uuid
import os
from dotenv import load_dotenv
from groq import Groq
from langsmith import Client as LangSmithClient
import time
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
load_dotenv()
HF_TOKEN = f"Bearer {os.getenv('HF_TOKEN_SECRET')}"
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")
groq_client = Groq(api_key=GROQ_API_KEY)
langsmith_client = LangSmithClient(api_key=LANGSMITH_API_KEY)
st.set_page_config(
page_title="đź§ NLP Magique",
page_icon="✨",
layout="wide",
initial_sidebar_state="collapsed"
)
st.markdown("""
""", unsafe_allow_html=True)
st.markdown("""
""", unsafe_allow_html=True)
def generate_text(prompt):
url = "https://api-inference.huggingface.co/models/gpt2"
headers = {"Authorization": HF_TOKEN}
payload = {"inputs": f"{prompt}"}
response = requests.post(url, headers=headers, json=payload)
if response.ok:
return response.json()[0]["generated_text"]
else:
st.error(f"Erreur: {response.status_code} - {response.text}")
return "Erreur lors de la génération."
def summarize_text(text):
url = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
headers = {"Authorization": HF_TOKEN}
payload = {"inputs": text}
response = requests.post(url, headers=headers, json=payload)
if response.ok:
return response.json()[0]["summary_text"]
else:
st.error(f"Erreur: {response.status_code} - {response.text}")
return "Erreur lors du résumé."
def transcribe_audio(path):
url = "https://api-inference.huggingface.co/models/openai/whisper-large-v2"
headers = {
"Authorization": HF_TOKEN,
"Content-Type": "application/octet-stream"
}
with open(path, "rb") as f:
response = requests.post(url, headers=headers, data=f)
if response.ok:
return response.json()["text"]
else:
st.error(f"Erreur: {response.status_code} - {response.text}")
return "Erreur lors de la transcription."
def chat_with_grok(user_input, conversation_id):
try:
run = langsmith_client.create_run(
name="Grok_Chat",
run_type="chain",
inputs={"user_input": user_input, "conversation_id": conversation_id}
)
response = groq_client.chat.completions.create(
messages=[
{"role": "system",
"content": "Vous êtes Grok, un assistant IA créé par xAI. Répondez en français avec précision et clarté."},
{"role": "user", "content": user_input}
],
model="llama-3.3-70b-versatile",
temperature=0.7,
max_tokens=1000
)
response_text = response.choices[0].message.content
if run:
run.update({"outputs": response_text, "end_time": time.time()})
return response_text
except Exception as e:
if 'run' in locals() and run:
run.update({"outputs": {"error": str(e)}, "end_time": time.time(), "status": "error"})
st.error(f"Erreur: {str(e)}")
return "Une erreur s'est produite. Réessayez."
# Navbar
st.markdown('
', unsafe_allow_html=True)
st.markdown(' NLP Magique ', unsafe_allow_html=True)
tabs = [
{"name": "Résumé", "icon": "fas fa-compress"},
{"name": "Génération", "icon": "fas fa-pen-fancy"},
{"name": "Transcription", "icon": "fas fa-microphone"},
{"name": "Chatbot", "icon": "fas fa-comment-dots"},
{"name": "LangSmith Monitoring", "icon": "fas fa-chart-line"}
]
selected_tab = st.session_state.get('selected_tab', tabs[0]["name"])
for tab in tabs:
active = 'active' if selected_tab == tab["name"] else ''
if st.button(f' {tab["name"]}', key=f"nav_{tab["name"]}", help=tab["name"],
unsafe_allow_html=True):
st.session_state.selected_tab = tab["name"]
st.rerun()
st.markdown('
', unsafe_allow_html=True)
# Initialize session state
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
if 'conversation_id' not in st.session_state:
st.session_state.conversation_id = str(uuid.uuid4())
if 'langsmith_runs' not in st.session_state:
st.session_state.langsmith_runs = []
st.markdown('✨ NLP Magique ', unsafe_allow_html=True)
if selected_tab == "Génération":
with st.container():
st.markdown("", unsafe_allow_html=True)
st.subheader("Génération de texte")
prompt = st.text_area("Entrez votre idée :", "La médecine moderne", height=150)
temp = st.slider("Créativité", 0.1, 1.0, 0.7, step=0.1)
if st.button("Générer"):
with st.spinner("Génération..."):
output = generate_text(prompt)
st.markdown(
f'
{output}Copier
',
unsafe_allow_html=True)
st.markdown("
", unsafe_allow_html=True)
elif selected_tab == "Résumé":
with st.container():
st.markdown("", unsafe_allow_html=True)
st.subheader("Résumé de texte")
texte = st.text_area("Collez votre texte :", height=200)
if st.button("Résumer"):
with st.spinner("Résumé..."):
summary = summarize_text(texte)
st.markdown(
f'
{summary}Copier
',
unsafe_allow_html=True)
st.markdown("
", unsafe_allow_html=True)
elif selected_tab == "Transcription":
with st.container():
st.markdown("", unsafe_allow_html=True)
st.subheader("Transcription audio")
audio_file = st.file_uploader("Chargez un audio (max 30s)", type=["wav", "mp3", "m4a"])
if audio_file:
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
tmp_file.write(audio_file.read())
audio_path = tmp_file.name
st.audio(audio_path)
if st.button("Transcrire"):
with st.spinner("Transcription..."):
transcript = transcribe_audio(audio_path)
st.markdown(
f'
{transcript}Copier
',
unsafe_allow_html=True)
st.markdown("
", unsafe_allow_html=True)
elif selected_tab == "Chatbot":
with st.container():
st.markdown('', unsafe_allow_html=True)
st.markdown('', unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
for message in st.session_state.chat_history:
if message['role'] == 'user':
st.markdown(f'
{message["content"]}
',
unsafe_allow_html=True)
else:
st.markdown(f'
{message["content"]}
', unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
user_input = st.text_input("Votre message :", key="chat_input", placeholder="Tapez ici...")
col1, col2 = st.columns([3, 1])
with col1:
if st.button("Envoyer", key="chat_send"):
if user_input:
with st.spinner("Grok répond..."):
st.session_state.chat_history.append({"role": "user", "content": user_input})
response = chat_with_grok(user_input, st.session_state.conversation_id)
st.session_state.chat_history.append({"role": "assistant", "content": response})
st.session_state.langsmith_runs.append({
"input": user_input,
"output": response,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
"input_length": len(user_input)
})
st.rerun()
with col2:
if st.button("Effacer", key="chat_clear"):
st.session_state.chat_history = []
st.session_state.conversation_id = str(uuid.uuid4())
st.session_state.langsmith_runs = []
st.rerun()
st.markdown('
', unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
elif selected_tab == "LangSmith Monitoring":
with st.container():
st.markdown("", unsafe_allow_html=True)
st.subheader("LangSmith Monitoring")
if st.session_state.langsmith_runs:
df = pd.DataFrame(st.session_state.langsmith_runs)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['hour'] = df['timestamp'].dt.strftime('%Y-%m-%d %H:00')
# Chart 1: Interactions per Hour
st.markdown("
Interactions par heure ", unsafe_allow_html=True)
chart_type = st.selectbox("Type de graphique", ["Bar", "Line", "Area"], key="chart_type_interactions")
interaction_counts = df.groupby('hour').size().reset_index(name='count')
if chart_type == "Bar":
fig = px.bar(
interaction_counts,
x='hour',
y='count',
color_discrete_sequence=['#4f46e5'],
title="Nombre d'interactions par heure",
labels={'hour': 'Heure', 'count': 'Nombre d\'interactions'}
)
elif chart_type == "Line":
fig = px.line(
interaction_counts,
x='hour',
y='count',
color_discrete_sequence=['#4f46e5'],
title="Nombre d'interactions par heure",
labels={'hour': 'Heure', 'count': 'Nombre d\'interactions'},
markers=True
)
else: # Area
fig = px.area(
interaction_counts,
x='hour',
y='count',
color_discrete_sequence=['#4f46e5'],
title="Nombre d'interactions par heure",
labels={'hour': 'Heure', 'count': 'Nombre d\'interactions'}
)
fig.update_layout(
plot_bgcolor='rgba(0,0,0,0)',
paper_bgcolor='rgba(0,0,0,0)',
font=dict(family="Inter, sans-serif", size=12, color="#1f2937"),
xaxis_tickangle=45,
showlegend=False,
margin=dict(l=40, r=40, t=80, b=40)
)
st.plotly_chart(fig, use_container_width=True)
# Chart 2: Input Length per Interaction
st.markdown("
Longueur des entrées par interaction ", unsafe_allow_html=True)
input_length_chart_type = st.selectbox("Type de graphique", ["Bar", "Line", "Area"],
key="chart_type_input_length")
input_lengths = df.groupby('hour')['input_length'].mean().reset_index(name='avg_length')
if input_length_chart_type == "Bar":
fig2 = px.bar(
input_lengths,
x='hour',
y='avg_length',
color_discrete_sequence=['#7c3aed'],
title="Longueur moyenne des entrées par heure",
labels={'hour': 'Heure', 'avg_length': 'Longueur moyenne (caractères)'}
)
elif input_length_chart_type == "Line":
fig2 = px.line(
input_lengths,
x='hour',
y='avg_length',
color_discrete_sequence=['#7c3aed'],
title="Longueur moyenne des entrées par heure",
labels={'hour': 'Heure', 'avg_length': 'Longueur moyenne (caractères)'},
markers=True
)
else: # Area
fig2 = px.area(
input_lengths,
x='hour',
y='avg_length',
color_discrete_sequence=['#7c3aed'],
title="Longueur moyenne des entrées par heure",
labels={'hour': 'Heure', 'avg_length': 'Longueur moyenne (caractères)'}
)
fig2.update_layout(
plot_bgcolor='rgba(0,0,0,0)',
paper_bgcolor='rgba(0,0,0,0)',
font=dict(family="Inter, sans-serif", size=12, color="#1f2937"),
xaxis_tickangle=45,
showlegend=False,
margin=dict(l=40, r=40, t=80, b=40)
)
st.plotly_chart(fig2, use_container_width=True)
# Table: Run Details
st.markdown("
Détails des interactions ", unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
st.markdown('Timestamp Input Output Longueur entrée ',
unsafe_allow_html=True)
for run in st.session_state.langsmith_runs:
st.markdown(
f'{run["timestamp"]} {run["input"]} {run["output"]} {run["input_length"]} ',
unsafe_allow_html=True
)
st.markdown('
', unsafe_allow_html=True)
else:
st.write("Aucune donnée de monitoring disponible.")
st.markdown("
", unsafe_allow_html=True)
st.markdown("""
""", unsafe_allow_html=True)