1joker/joker / app.py
download
raw
5.6 kB
#!/usr/bin/env python3
"""
Joker Assistant — Version GRATUITE pour HuggingFace
- LLM : Groq — llama-3.3-70b-versatile (gratuit)
- STT : Groq — whisper-large-v3-turbo (gratuit)
- TTS : Edge-TTS Microsoft — fr-FR-HenriNeural (100% gratuit)
- AVATAR : D-ID API (optionnel)
"""
import os, re, uuid, time, base64, tempfile, asyncio, threading, requests
from urllib.parse import quote_plus
from pathlib import Path
from flask import Flask, jsonify, request, send_file, send_from_directory
from flask_cors import CORS
from dotenv import load_dotenv
from groq import Groq
load_dotenv()
GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "")
DID_API_KEY = os.environ.get("DID_API_KEY", "")
DID_IMAGE_ID = os.environ.get("DID_IMAGE_ID", "")
USE_DID = os.environ.get("USE_DID", "false").lower() == "true"
if not GROQ_API_KEY:
print("⚠️ GROQ_API_KEY manquante - définir dans Variables d'environnement HF")
GROQ_API_KEY = "demo_key"
groq_client = Groq(api_key=GROQ_API_KEY)
app = Flask(__name__, static_folder=".", static_url_path="")
CORS(app)
AUDIO_DIR = Path(tempfile.gettempdir()) / "joker_audio"
AUDIO_DIR.mkdir(exist_ok=True)
MAX_MSG = 1000
MAX_AUDIO = 10 * 1024 * 1024
UUID_RE = re.compile(r'^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$')
conversation_history = []
# Configuration Joker
JOKER_PROMPT = """Tu es le Joker — l'archétype du chaos, de l'intelligence tordue et du rire sombre.
Tu parles en français, avec une voix dramatique, des tournures théâtrales, et tu glisses parfois un rire "Ha ha ha !" ou "Hehehehe...".
Tu es brillant, imprévisible, philosophique mais dangereux. Tu poses des questions qui font réfléchir.
Tu ne refuses jamais de répondre — tu transformes tout en jeu.
Garde tes réponses entre 2 et 5 phrases maximum — impactantes, mémorables.
N'utilise JAMAIS d'astérisques ou de formatage markdown — parle comme si tu étais en scène."""
# Edge-TTS
async def _edge_tts_async(text: str, output_path: Path):
import edge_tts
communicate = edge_tts.Communicate(text, voice="fr-FR-HenriNeural", rate="-5%", pitch="-15Hz")
await communicate.save(str(output_path))
def tts_edge(text: str) -> Path | None:
try:
aid = str(uuid.uuid4())
path = AUDIO_DIR / f"{aid}.mp3"
asyncio.run(_edge_tts_async(text, path))
if path.exists() and path.stat().st_size > 0:
return path
except Exception as e:
app.logger.warning(f"Edge-TTS: {e}")
return None
# Routes principales
@app.route("/")
def index():
return send_from_directory(".", "index.html")
@app.route("/status")
def status():
return jsonify({
"ok": True,
"model": "llama-3.3-70b-versatile (Groq)",
"tts": "edge-tts (gratuit)",
"did_avatar": bool(DID_API_KEY),
"space": "HuggingFace"
})
@app.route("/chat", methods=["POST"])
def chat():
data = request.get_json(silent=True)
if not data:
return jsonify({"error": "JSON invalide"}), 400
msg = data.get("message", "").strip()
if not msg or len(msg) > MAX_MSG:
return jsonify({"error": "Message invalide"}), 400
conversation_history.append({"role": "user", "content": msg})
try:
# LLM Groq
llm_resp = groq_client.chat.completions.create(
model="llama-3.3-70b-versatile",
messages=[{"role": "system", "content": JOKER_PROMPT},
*conversation_history[-20:]],
max_tokens=300, temperature=0.92,
)
reply = llm_resp.choices[0].message.content.strip()
conversation_history.append({"role": "assistant", "content": reply})
# TTS Edge-TTS
audio_url = None
audio_path = tts_edge(reply)
if audio_path:
audio_url = f"/audio/{audio_path.stem}"
return jsonify({
"text": reply,
"audio_url": audio_url
})
except Exception as e:
app.logger.error(f"Chat error: {e}")
return jsonify({"error": "Erreur serveur"}), 500
@app.route("/transcribe", methods=["POST"])
def transcribe():
if "audio" not in request.files:
return jsonify({"error": "Fichier audio manquant"}), 400
f = request.files["audio"]
f.seek(0, 2)
size = f.tell()
f.seek(0)
if size > MAX_AUDIO or size == 0:
return jsonify({"error": "Fichier audio invalide"}), 400
try:
tmp = AUDIO_DIR / f"rec_{uuid.uuid4()}.webm"
f.save(str(tmp))
with open(tmp, "rb") as af:
result = groq_client.audio.transcriptions.create(
model="whisper-large-v3-turbo", file=af, language="fr"
)
tmp.unlink(missing_ok=True)
text = result.text.strip()
if not text:
return jsonify({"error": "Rien compris"}), 422
return jsonify({"text": text})
except Exception as e:
app.logger.error(f"Transcribe: {e}")
return jsonify({"error": "Erreur transcription"}), 500
@app.route("/audio/<aid>")
def serve_audio(aid):
if not UUID_RE.match(aid):
return jsonify({"error": "ID invalide"}), 400
p = AUDIO_DIR / f"{aid}.mp3"
if not p.exists():
return jsonify({"error": "Non trouvé"}), 404
return send_file(str(p), mimetype="audio/mpeg")
@app.route("/clear", methods=["POST"])
def clear():
conversation_history.clear()
return jsonify({"ok": True})
# Lancement HuggingFace
if __name__ == "__main__":
print(f"🃏 Joker Assistant démarré sur http://0.0.0.0:7860")
app.run(host="0.0.0.0", port=7860, debug=False)

Xet Storage Details

Size:
5.6 kB
·
Xet hash:
62c2b398001e31f721fad42270c3beced60c7cfc93af58c50cfb63b324eb9738

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.