Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,13 +14,13 @@ from huggingface_hub import InferenceClient
|
|
| 14 |
# --- AUTHENTICATION ---
|
| 15 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 16 |
|
| 17 |
-
# --- CONFIGURATION ---
|
| 18 |
-
#
|
| 19 |
-
# These models are the most stable and avoid the 'ovhcloud' or 'novita' errors.
|
| 20 |
LLM_MODELS = {
|
| 21 |
-
"Llama 3.
|
| 22 |
-
"Qwen 2.5 7B (
|
| 23 |
-
"
|
|
|
|
| 24 |
}
|
| 25 |
|
| 26 |
LANGUAGES = {
|
|
@@ -31,15 +31,13 @@ LANGUAGES = {
|
|
| 31 |
"Chinese (Mandarin)": {"code": "zh-CN", "ipa": "cmn", "voice": "zh-CN-XiaoxiaoNeural"}
|
| 32 |
}
|
| 33 |
|
| 34 |
-
# Load ASR model (Whisper Tiny)
|
| 35 |
print("Loading Whisper ASR...")
|
| 36 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1)
|
| 37 |
|
| 38 |
# --- FUNCTIONS ---
|
| 39 |
|
| 40 |
def get_llm_response(model_id, system_prompt, user_prompt):
|
| 41 |
-
# We use provider="hf-inference" to FORCE Hugging Face to use its own servers
|
| 42 |
-
# and avoid the "ovhcloud" or "novita" errors.
|
| 43 |
client = InferenceClient(model=model_id, token=HF_TOKEN)
|
| 44 |
try:
|
| 45 |
response = ""
|
|
@@ -48,27 +46,26 @@ def get_llm_response(model_id, system_prompt, user_prompt):
|
|
| 48 |
{"role": "user", "content": user_prompt}
|
| 49 |
]
|
| 50 |
|
| 51 |
-
# We
|
| 52 |
-
|
| 53 |
messages,
|
| 54 |
max_tokens=500,
|
| 55 |
-
stream=
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
token = message.choices[0].delta.content
|
| 59 |
-
if token:
|
| 60 |
-
response += token
|
| 61 |
-
|
| 62 |
-
return response if response else "Error: No response from model."
|
| 63 |
|
| 64 |
except Exception as e:
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
def generate_curriculum(model_name, language, topic):
|
| 69 |
model_id = LLM_MODELS[model_name]
|
| 70 |
-
system_prompt = f"You are PANINI LLM,
|
| 71 |
-
user_prompt = f"
|
| 72 |
return get_llm_response(model_id, system_prompt, user_prompt)
|
| 73 |
|
| 74 |
async def play_target_audio(text, lang_name):
|
|
@@ -81,61 +78,64 @@ async def play_target_audio(text, lang_name):
|
|
| 81 |
|
| 82 |
def analyze_speech(model_name, lang_name, target_text, audio_path):
|
| 83 |
if not audio_path or not target_text:
|
| 84 |
-
return "
|
| 85 |
|
|
|
|
| 86 |
asr_res = asr_pipe(audio_path)["text"].strip()
|
| 87 |
-
ipa_code = LANGUAGES[lang_name]["ipa"]
|
| 88 |
|
|
|
|
|
|
|
| 89 |
try:
|
| 90 |
target_ipa = phonemize(target_text, language=ipa_code, backend='espeak', strip=True)
|
| 91 |
user_ipa = phonemize(asr_res, language=ipa_code, backend='espeak', strip=True)
|
| 92 |
except:
|
| 93 |
-
target_ipa = "IPA
|
| 94 |
-
user_ipa = "IPA
|
| 95 |
|
|
|
|
| 96 |
model_id = LLM_MODELS[model_name]
|
| 97 |
-
system_prompt = "You are
|
| 98 |
user_prompt = (
|
| 99 |
-
f"Target
|
| 100 |
-
f"Student
|
| 101 |
-
f"
|
| 102 |
)
|
| 103 |
|
| 104 |
feedback = get_llm_response(model_id, system_prompt, user_prompt)
|
| 105 |
return asr_res, f"/{user_ipa}/", feedback
|
| 106 |
|
| 107 |
# --- UI ---
|
| 108 |
-
with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width:
|
| 109 |
-
gr.HTML("<h1 style='text-align: center;'>ποΈ PANINI LLM</h1>")
|
| 110 |
-
gr.HTML("<p style='text-align: center;'>
|
| 111 |
|
| 112 |
-
with gr.Tab("1
|
| 113 |
with gr.Row():
|
| 114 |
-
llm_choice = gr.Dropdown(list(LLM_MODELS.keys()), label="Select
|
| 115 |
-
lang_choice = gr.Dropdown(list(LANGUAGES.keys()), label="
|
| 116 |
|
| 117 |
-
topic_input = gr.Textbox(label="
|
| 118 |
-
btn_gen = gr.Button("π Generate Lesson
|
| 119 |
-
curr_output = gr.Markdown("
|
| 120 |
|
| 121 |
-
with gr.Tab("2
|
| 122 |
with gr.Row():
|
| 123 |
-
target_word = gr.Textbox(label="Phrase
|
| 124 |
-
btn_tts = gr.Button("π
|
| 125 |
|
| 126 |
-
audio_ref = gr.Audio(label="
|
| 127 |
|
| 128 |
with gr.Row():
|
| 129 |
-
audio_user = gr.Audio(label="Your
|
| 130 |
btn_analyze = gr.Button("π Analyze Accent", variant="primary")
|
| 131 |
|
| 132 |
with gr.Row():
|
| 133 |
-
out_transcript = gr.Textbox(label="AI
|
| 134 |
-
out_ipa = gr.Textbox(label="Your
|
| 135 |
|
| 136 |
-
out_feedback = gr.Markdown()
|
| 137 |
|
| 138 |
-
#
|
| 139 |
btn_gen.click(generate_curriculum, inputs=[llm_choice, lang_choice, topic_input], outputs=curr_output)
|
| 140 |
btn_tts.click(fn=lambda t, l: asyncio.run(play_target_audio(t, l)), inputs=[target_word, lang_choice], outputs=audio_ref)
|
| 141 |
btn_analyze.click(analyze_speech, inputs=[llm_choice, lang_choice, target_word, audio_user], outputs=[out_transcript, out_ipa, out_feedback])
|
|
|
|
| 14 |
# --- AUTHENTICATION ---
|
| 15 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 16 |
|
| 17 |
+
# --- CONFIGURATION: 2025 STABLE MODELS ---
|
| 18 |
+
# These models are currently the most reliable on the Hugging Face Free Inference API.
|
|
|
|
| 19 |
LLM_MODELS = {
|
| 20 |
+
"Llama 3.2 3B (Fast & Smart)": "meta-llama/Llama-3.2-3B-Instruct",
|
| 21 |
+
"Qwen 2.5 7B (Excellent Accuracy)": "Qwen/Qwen2.5-7B-Instruct",
|
| 22 |
+
"Gemma 2 9B (Google's Best)": "google/gemma-2-9b-it",
|
| 23 |
+
"Llama 3.3 70B (Powerhouse - Busy)": "meta-llama/Llama-3.3-70B-Instruct"
|
| 24 |
}
|
| 25 |
|
| 26 |
LANGUAGES = {
|
|
|
|
| 31 |
"Chinese (Mandarin)": {"code": "zh-CN", "ipa": "cmn", "voice": "zh-CN-XiaoxiaoNeural"}
|
| 32 |
}
|
| 33 |
|
| 34 |
+
# Load ASR model (Whisper Tiny) - remains the same for CPU efficiency
|
| 35 |
print("Loading Whisper ASR...")
|
| 36 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1)
|
| 37 |
|
| 38 |
# --- FUNCTIONS ---
|
| 39 |
|
| 40 |
def get_llm_response(model_id, system_prompt, user_prompt):
|
|
|
|
|
|
|
| 41 |
client = InferenceClient(model=model_id, token=HF_TOKEN)
|
| 42 |
try:
|
| 43 |
response = ""
|
|
|
|
| 46 |
{"role": "user", "content": user_prompt}
|
| 47 |
]
|
| 48 |
|
| 49 |
+
# We allow the router to find the best provider automatically for better stability
|
| 50 |
+
output = client.chat_completion(
|
| 51 |
messages,
|
| 52 |
max_tokens=500,
|
| 53 |
+
stream=False # Non-streaming is often more stable for curriculum tasks
|
| 54 |
+
)
|
| 55 |
+
return output.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
except Exception as e:
|
| 58 |
+
error_str = str(e)
|
| 59 |
+
if "410" in error_str:
|
| 60 |
+
return "β οΈ This model version was recently retired by the provider. Please try the 'Llama 3.2' or 'Qwen' option."
|
| 61 |
+
if "503" in error_str:
|
| 62 |
+
return "β³ The model is currently 'waking up' or busy. Please wait 30 seconds and try again."
|
| 63 |
+
return f"System Note: {error_str}"
|
| 64 |
|
| 65 |
def generate_curriculum(model_name, language, topic):
|
| 66 |
model_id = LLM_MODELS[model_name]
|
| 67 |
+
system_prompt = f"You are PANINI LLM, a structured language teacher for {language}. Create a short lesson."
|
| 68 |
+
user_prompt = f"Topic: {topic}. Provide 5 words/phrases with English translations and one tip for a beginner."
|
| 69 |
return get_llm_response(model_id, system_prompt, user_prompt)
|
| 70 |
|
| 71 |
async def play_target_audio(text, lang_name):
|
|
|
|
| 78 |
|
| 79 |
def analyze_speech(model_name, lang_name, target_text, audio_path):
|
| 80 |
if not audio_path or not target_text:
|
| 81 |
+
return "Incomplete data.", "", "Provide text and recording."
|
| 82 |
|
| 83 |
+
# 1. Transcription
|
| 84 |
asr_res = asr_pipe(audio_path)["text"].strip()
|
|
|
|
| 85 |
|
| 86 |
+
# 2. Phonetic Data (Linguistic layer)
|
| 87 |
+
ipa_code = LANGUAGES[lang_name]["ipa"]
|
| 88 |
try:
|
| 89 |
target_ipa = phonemize(target_text, language=ipa_code, backend='espeak', strip=True)
|
| 90 |
user_ipa = phonemize(asr_res, language=ipa_code, backend='espeak', strip=True)
|
| 91 |
except:
|
| 92 |
+
target_ipa = "IPA Unavailable"
|
| 93 |
+
user_ipa = "IPA Unavailable"
|
| 94 |
|
| 95 |
+
# 3. LLM Analysis
|
| 96 |
model_id = LLM_MODELS[model_name]
|
| 97 |
+
system_prompt = "You are an expert Speech-Language Pathologist. Focus on anatomical advice."
|
| 98 |
user_prompt = (
|
| 99 |
+
f"Target: '{target_text}' (IPA: /{target_ipa}/). "
|
| 100 |
+
f"Student: '{asr_res}' (IPA: /{user_ipa}/). "
|
| 101 |
+
f"Identify the primary error and give one tip on tongue or lip placement."
|
| 102 |
)
|
| 103 |
|
| 104 |
feedback = get_llm_response(model_id, system_prompt, user_prompt)
|
| 105 |
return asr_res, f"/{user_ipa}/", feedback
|
| 106 |
|
| 107 |
# --- UI ---
|
| 108 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo"), css=".gradio-container {max-width: 950px !important}") as demo:
|
| 109 |
+
gr.HTML("<h1 style='text-align: center; color: #312e81;'>ποΈ PANINI LLM</h1>")
|
| 110 |
+
gr.HTML("<p style='text-align: center; margin-top: -10px;'>Intelligent Language Pedagogy & Phonetic Analysis</p>")
|
| 111 |
|
| 112 |
+
with gr.Tab("Step 1: Curriculum"):
|
| 113 |
with gr.Row():
|
| 114 |
+
llm_choice = gr.Dropdown(list(LLM_MODELS.keys()), label="Select AI Teacher", value="Qwen 2.5 7B (Excellent Accuracy)")
|
| 115 |
+
lang_choice = gr.Dropdown(list(LANGUAGES.keys()), label="Language", value="English (US)")
|
| 116 |
|
| 117 |
+
topic_input = gr.Textbox(label="Enter Topic", placeholder="e.g. At the grocery store, Job Interview, Hobbies")
|
| 118 |
+
btn_gen = gr.Button("π Generate Lesson", variant="primary")
|
| 119 |
+
curr_output = gr.Markdown("---")
|
| 120 |
|
| 121 |
+
with gr.Tab("Step 2: Pronunciation"):
|
| 122 |
with gr.Row():
|
| 123 |
+
target_word = gr.Textbox(label="Practice this Phrase", placeholder="Copy a word from Step 1 or type your own")
|
| 124 |
+
btn_tts = gr.Button("π Hear Native AI", scale=0)
|
| 125 |
|
| 126 |
+
audio_ref = gr.Audio(label="Model Audio", type="filepath")
|
| 127 |
|
| 128 |
with gr.Row():
|
| 129 |
+
audio_user = gr.Audio(label="Record Your Version", sources=["microphone"], type="filepath")
|
| 130 |
btn_analyze = gr.Button("π Analyze Accent", variant="primary")
|
| 131 |
|
| 132 |
with gr.Row():
|
| 133 |
+
out_transcript = gr.Textbox(label="Transcription (What the AI heard)")
|
| 134 |
+
out_ipa = gr.Textbox(label="Your IPA (Phonetics)")
|
| 135 |
|
| 136 |
+
out_feedback = gr.Markdown("---")
|
| 137 |
|
| 138 |
+
# Event Wiring
|
| 139 |
btn_gen.click(generate_curriculum, inputs=[llm_choice, lang_choice, topic_input], outputs=curr_output)
|
| 140 |
btn_tts.click(fn=lambda t, l: asyncio.run(play_target_audio(t, l)), inputs=[target_word, lang_choice], outputs=audio_ref)
|
| 141 |
btn_analyze.click(analyze_speech, inputs=[llm_choice, lang_choice, target_word, audio_user], outputs=[out_transcript, out_ipa, out_feedback])
|