Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -8,6 +8,7 @@ import librosa
|
|
| 8 |
import numpy as np
|
| 9 |
import soundfile as sf
|
| 10 |
import whisper
|
|
|
|
| 11 |
|
| 12 |
# --- 0. CONFIGURACIÓN INICIAL ---
|
| 13 |
try:
|
|
@@ -16,136 +17,168 @@ try:
|
|
| 16 |
except TypeError:
|
| 17 |
api_key_found = False
|
| 18 |
|
| 19 |
-
print("
|
| 20 |
-
whisper_model = whisper.load_model("base", device="cpu")
|
| 21 |
-
print("
|
| 22 |
|
| 23 |
-
|
| 24 |
-
# --- CAMBIO CLAVE: El nuevo súper-prompt ---
|
| 25 |
SYSTEM_PROMPT = """
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
**Tu proceso de análisis es un riguroso examen en 4 pasos:**
|
| 29 |
-
|
| 30 |
-
**Paso 1: Transcripción Fonética (IPA)**
|
| 31 |
-
Para cada palabra de la **frase de referencia**, obtén su transcripción fonética estándar en IPA (Alfabeto Fonético Internacional) bajo el modelo de Inglés Americano General.
|
| 32 |
|
| 33 |
-
**
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
**
|
| 37 |
-
|
| 38 |
|
| 39 |
-
**
|
| 40 |
-
Después del análisis técnico, crea una respuesta corta, amigable y humana que se relacione con el tema de la frase. Finaliza con una pregunta abierta para invitar al usuario a seguir hablando. (ej. "¡Excelente! Veo que hablamos de colores. ¿Cuál es tu color favorito y por qué?").
|
| 41 |
-
|
| 42 |
-
**Formato de Salida Obligatorio:**
|
| 43 |
-
Tu respuesta DEBE ser un objeto JSON válido, sin texto, explicaciones o comentarios adicionales fuera del JSON. La estructura debe ser la siguiente:
|
| 44 |
{
|
| 45 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
{
|
| 47 |
-
"
|
| 48 |
-
"
|
| 49 |
-
"
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
"estado": "string (Debe ser 'Correcto' o 'A Mejorar')",
|
| 53 |
-
"feedback": "string (Solo si el estado es 'A Mejorar', da un consejo técnico conciso, ej. 'La punta de la lengua debe vibrar cerca del paladar sin tocarlo.')"
|
| 54 |
-
}
|
| 55 |
-
]
|
| 56 |
}
|
| 57 |
-
]
|
| 58 |
-
"calificacion_general_100": integer,
|
| 59 |
-
"respuesta_conversacional": "string (Tu respuesta corta, amigable y con una pregunta abierta.)"
|
| 60 |
}
|
| 61 |
"""
|
| 62 |
|
| 63 |
-
# ---
|
| 64 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
try:
|
| 66 |
y, sr = librosa.load(audio_path, sr=16000)
|
| 67 |
-
|
| 68 |
-
if duration < 0.2: return {}
|
| 69 |
result = whisper_model.transcribe(audio_path, word_timestamps=True, fp16=False)
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
print(f"Error en extracción de métricas: {e}")
|
| 73 |
-
return {}
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
# ---
|
| 77 |
-
def
|
| 78 |
-
if not api_key_found: raise gr.Error("
|
| 79 |
if audio_input is None or not reference_transcript:
|
| 80 |
-
return "
|
| 81 |
|
| 82 |
sr, y = audio_input
|
| 83 |
temp_audio_path = "temp_audio.wav"
|
| 84 |
sf.write(temp_audio_path, y, sr)
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
**Frase de referencia:** "{reference_transcript}"
|
| 91 |
-
**Transcripción de Whisper:** "{audio_metrics.get('text', '(Silencio detectado)')}"
|
| 92 |
-
"""
|
| 93 |
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
response = client.chat.completions.create(
|
| 96 |
model="gpt-4o",
|
| 97 |
response_format={"type": "json_object"},
|
| 98 |
messages=[
|
| 99 |
{"role": "system", "content": SYSTEM_PROMPT},
|
| 100 |
-
{"role": "user", "content":
|
| 101 |
]
|
| 102 |
)
|
| 103 |
|
|
|
|
| 104 |
try:
|
| 105 |
result = json.loads(response.choices[0].message.content)
|
| 106 |
|
| 107 |
-
#
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
except (json.JSONDecodeError, KeyError) as e:
|
| 115 |
-
print(f"Error
|
| 116 |
-
error_msg = "
|
| 117 |
-
return
|
| 118 |
|
| 119 |
|
| 120 |
-
# ---
|
| 121 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 122 |
-
gr.Markdown("#
|
| 123 |
-
gr.Markdown("
|
| 124 |
|
| 125 |
frase_ejemplo = "The rainbow is a division of white light into many beautiful colors."
|
| 126 |
|
| 127 |
with gr.Row():
|
| 128 |
with gr.Column(scale=1):
|
| 129 |
-
audio_in = gr.Audio(sources=["microphone"], type="numpy", label="1.
|
| 130 |
-
text_in = gr.Textbox(lines=3, label="2.
|
| 131 |
-
submit_btn = gr.Button("
|
| 132 |
|
| 133 |
with gr.Column(scale=2):
|
| 134 |
-
gr.Markdown("###
|
| 135 |
-
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
-
|
| 139 |
-
|
| 140 |
|
| 141 |
submit_btn.click(
|
| 142 |
-
fn=
|
| 143 |
inputs=[audio_in, text_in],
|
| 144 |
-
outputs=[
|
| 145 |
)
|
| 146 |
|
| 147 |
if __name__ == "__main__":
|
| 148 |
if not api_key_found:
|
| 149 |
-
print("\nFATAL:
|
| 150 |
else:
|
| 151 |
demo.launch(debug=True)
|
|
|
|
| 8 |
import numpy as np
|
| 9 |
import soundfile as sf
|
| 10 |
import whisper
|
| 11 |
+
import pandas as pd
|
| 12 |
|
| 13 |
# --- 0. CONFIGURACIÓN INICIAL ---
|
| 14 |
try:
|
|
|
|
| 17 |
except TypeError:
|
| 18 |
api_key_found = False
|
| 19 |
|
| 20 |
+
print("Loading Whisper model...")
|
| 21 |
+
whisper_model = whisper.load_model("base", device="cpu")
|
| 22 |
+
print("Whisper model loaded.")
|
| 23 |
|
| 24 |
+
# --- PROMPT DEL EXAMINADOR EXPERTO ---
|
|
|
|
| 25 |
SYSTEM_PROMPT = """
|
| 26 |
+
You are an expert English language examiner specializing in phonetics and accent reduction for ESL learners. Your task is to provide a detailed, diagnostic assessment of a student's spoken English based on a reference sentence and detailed word-level audio analysis.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
**Input You Will Receive:**
|
| 29 |
+
You will be given a JSON object containing:
|
| 30 |
+
1. `reference_transcript`: The correct sentence the student was supposed to say.
|
| 31 |
+
2. `spoken_words`: A list of words detected by Whisper, each with:
|
| 32 |
+
- `word`: The word as transcribed by Whisper.
|
| 33 |
+
- `start`: The start time of the word in seconds.
|
| 34 |
+
- `end`: The end time of the word in seconds.
|
| 35 |
+
- `energy`: A numeric value (RMS) indicating the pronunciation's energy/loudness.
|
| 36 |
|
| 37 |
+
**Your Analysis and Output:**
|
| 38 |
+
Your entire response MUST be in English. You must return a single, valid JSON object with the following structure. Do not include any text outside of this JSON object.
|
| 39 |
|
| 40 |
+
**JSON Output Structure:**
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
{
|
| 42 |
+
"overall_score_100": integer,
|
| 43 |
+
"cefr_level": "string (A1, A2, B1, B2, C1, or C2)",
|
| 44 |
+
"holistic_feedback": {
|
| 45 |
+
"strengths": "string (A paragraph in English summarizing the student's strong points in pronunciation, rhythm, and clarity.)",
|
| 46 |
+
"areas_for_improvement": "string (A paragraph in English detailing the main patterns of error and what to focus on.)"
|
| 47 |
+
},
|
| 48 |
+
"word_by_word_analysis": [
|
| 49 |
{
|
| 50 |
+
"reference_word": "string (The word from the correct sentence)",
|
| 51 |
+
"spoken_word": "string (The word Whisper transcribed, or 'OMITTED')",
|
| 52 |
+
"word_score_100": integer,
|
| 53 |
+
"correct_ipa": "string (The correct IPA transcription)",
|
| 54 |
+
"feedback": "string (Specific phonetic feedback for this word. If correct, simply state 'Excellent pronunciation.')"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
}
|
| 56 |
+
]
|
|
|
|
|
|
|
| 57 |
}
|
| 58 |
"""
|
| 59 |
|
| 60 |
+
# --- 1. EXTRACCIÓN DETALLADA DE CARACTERÍSTICAS (WHISPER + LIBROSA) ---
|
| 61 |
+
def extract_word_level_features(audio_path):
|
| 62 |
+
"""
|
| 63 |
+
This function uses Whisper to get word timestamps and Librosa to get
|
| 64 |
+
features for each word's audio segment.
|
| 65 |
+
"""
|
| 66 |
try:
|
| 67 |
y, sr = librosa.load(audio_path, sr=16000)
|
| 68 |
+
|
|
|
|
| 69 |
result = whisper_model.transcribe(audio_path, word_timestamps=True, fp16=False)
|
| 70 |
+
if not result["segments"] or not result["segments"][0]["words"]:
|
| 71 |
+
return []
|
|
|
|
|
|
|
| 72 |
|
| 73 |
+
word_segments = result["segments"][0]["words"]
|
| 74 |
+
|
| 75 |
+
features_list = []
|
| 76 |
+
for segment in word_segments:
|
| 77 |
+
start_sample = int(segment['start'] * sr)
|
| 78 |
+
end_sample = int(segment['end'] * sr)
|
| 79 |
+
word_audio = y[start_sample:end_sample]
|
| 80 |
+
|
| 81 |
+
# Calculate Root Mean Square (RMS) energy for the word
|
| 82 |
+
rms_energy = np.mean(librosa.feature.rms(y=word_audio))
|
| 83 |
+
|
| 84 |
+
features_list.append({
|
| 85 |
+
"word": segment['word'].strip(),
|
| 86 |
+
"start": round(segment['start'], 2),
|
| 87 |
+
"end": round(segment['end'], 2),
|
| 88 |
+
"energy": round(rms_energy, 4)
|
| 89 |
+
})
|
| 90 |
+
return features_list
|
| 91 |
+
except Exception as e:
|
| 92 |
+
print(f"Error during feature extraction: {e}")
|
| 93 |
+
return []
|
| 94 |
|
| 95 |
+
# --- 2. FUNCIÓN PRINCIPAL DE EVALUACIÓN ---
|
| 96 |
+
def run_evaluation(audio_input, reference_transcript):
|
| 97 |
+
if not api_key_found: raise gr.Error("OpenAI API key not found.")
|
| 98 |
if audio_input is None or not reference_transcript:
|
| 99 |
+
return 0, "N/A", "Please provide both an audio file and the reference text.", None
|
| 100 |
|
| 101 |
sr, y = audio_input
|
| 102 |
temp_audio_path = "temp_audio.wav"
|
| 103 |
sf.write(temp_audio_path, y, sr)
|
| 104 |
|
| 105 |
+
# Step 1: Extract detailed features using Whisper and Librosa
|
| 106 |
+
word_features = extract_word_level_features(temp_audio_path)
|
| 107 |
+
if not word_features:
|
| 108 |
+
return 0, "N/A", "Could not process the audio. Please try recording again.", None
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
+
# Step 2: Construct the detailed prompt for the OpenAI API
|
| 111 |
+
prompt_data = {
|
| 112 |
+
"reference_transcript": reference_transcript,
|
| 113 |
+
"spoken_words": word_features
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
print("Sending detailed data to GPT-4o for analysis...")
|
| 117 |
response = client.chat.completions.create(
|
| 118 |
model="gpt-4o",
|
| 119 |
response_format={"type": "json_object"},
|
| 120 |
messages=[
|
| 121 |
{"role": "system", "content": SYSTEM_PROMPT},
|
| 122 |
+
{"role": "user", "content": json.dumps(prompt_data)}
|
| 123 |
]
|
| 124 |
)
|
| 125 |
|
| 126 |
+
# Step 3: Process the API response and format it for display
|
| 127 |
try:
|
| 128 |
result = json.loads(response.choices[0].message.content)
|
| 129 |
|
| 130 |
+
# Format the detailed report for Gradio
|
| 131 |
+
holistic_feedback_md = f"### Strengths\n{result['holistic_feedback']['strengths']}\n\n"
|
| 132 |
+
holistic_feedback_md += f"### Areas for Improvement\n{result['holistic_feedback']['areas_for_improvement']}"
|
| 133 |
+
|
| 134 |
+
# Create a pandas DataFrame for better display
|
| 135 |
+
word_analysis_df = pd.DataFrame(result['word_by_word_analysis'])
|
| 136 |
+
|
| 137 |
+
return (
|
| 138 |
+
result.get("overall_score_100", 0),
|
| 139 |
+
result.get("cefr_level", "N/A"),
|
| 140 |
+
holistic_feedback_md,
|
| 141 |
+
gr.DataFrame(value=word_analysis_df, headers=["Reference Word", "Spoken Word", "Score", "Correct IPA", "Feedback"], interactive=False)
|
| 142 |
+
)
|
| 143 |
|
| 144 |
except (json.JSONDecodeError, KeyError) as e:
|
| 145 |
+
print(f"Error processing API response: {e}")
|
| 146 |
+
error_msg = "The API response was not in the expected format. Please try again."
|
| 147 |
+
return 0, "Error", error_msg, None
|
| 148 |
|
| 149 |
|
| 150 |
+
# --- 3. INTERFAZ DE GRADIO ---
|
| 151 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 152 |
+
gr.Markdown("# 🇬🇧 Expert Pronunciation Assessment")
|
| 153 |
+
gr.Markdown("Record yourself saying the reference sentence. Our AI examiner will provide a detailed diagnostic report on your performance.")
|
| 154 |
|
| 155 |
frase_ejemplo = "The rainbow is a division of white light into many beautiful colors."
|
| 156 |
|
| 157 |
with gr.Row():
|
| 158 |
with gr.Column(scale=1):
|
| 159 |
+
audio_in = gr.Audio(sources=["microphone"], type="numpy", label="1. Record Your Voice")
|
| 160 |
+
text_in = gr.Textbox(lines=3, label="2. Reference Sentence", value=frase_ejemplo)
|
| 161 |
+
submit_btn = gr.Button("Get Assessment", variant="primary")
|
| 162 |
|
| 163 |
with gr.Column(scale=2):
|
| 164 |
+
gr.Markdown("### Assessment Summary")
|
| 165 |
+
with gr.Row():
|
| 166 |
+
score_out = gr.Number(label="Overall Score (0-100)", interactive=False)
|
| 167 |
+
level_out = gr.Textbox(label="Estimated CEFR Level", interactive=False)
|
| 168 |
+
|
| 169 |
+
holistic_feedback_out = gr.Markdown(label="Examiner's Feedback")
|
| 170 |
|
| 171 |
+
gr.Markdown("--- \n ### Detailed Word-by-Word Analysis")
|
| 172 |
+
word_analysis_out = gr.DataFrame(headers=["Reference Word", "Spoken Word", "Score", "Correct IPA", "Feedback"], label="Phonetic Breakdown")
|
| 173 |
|
| 174 |
submit_btn.click(
|
| 175 |
+
fn=run_evaluation,
|
| 176 |
inputs=[audio_in, text_in],
|
| 177 |
+
outputs=[score_out, level_out, holistic_feedback_out, word_analysis_out]
|
| 178 |
)
|
| 179 |
|
| 180 |
if __name__ == "__main__":
|
| 181 |
if not api_key_found:
|
| 182 |
+
print("\nFATAL: OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.")
|
| 183 |
else:
|
| 184 |
demo.launch(debug=True)
|