mramirez2001 commited on
Commit
396f257
verified
1 Parent(s): 5e0b8e4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -126
app.py CHANGED
@@ -17,150 +17,176 @@ try:
17
  except TypeError:
18
  api_key_found = False
19
 
20
- print("Loading Whisper model...")
21
- whisper_model = whisper.load_model("base", device="cpu")
 
22
  print("Whisper model loaded.")
23
 
24
- # --- NUEVO: Lista de Trabalenguas ---
25
- TONGUE_TWISTERS = [
26
- "Peter Piper picked a peck of pickled peppers.",
27
- "She sells seashells by the seashore.",
28
- "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
29
- "Betty Botter bought some butter but she said the butter鈥檚 bitter.",
30
- "A proper copper coffee pot."
31
- ]
32
-
33
- # --- PROMPT DEL EXAMINADOR EXPERTO (Sin cambios) ---
34
- SYSTEM_PROMPT = """
35
- You are an expert English language examiner specializing in phonetics and accent reduction for ESL learners. Your task is to provide a detailed, diagnostic assessment of a student's spoken English based on a reference sentence and detailed word-level audio analysis.
36
-
37
- **Input You Will Receive:**
38
- You will be given a JSON object containing:
39
- 1. `reference_transcript`: The correct sentence the student was supposed to say.
40
- 2. `spoken_words`: A list of words detected by Whisper, each with:
41
- - `word`: The word as transcribed by Whisper.
42
- - `start`: The start time of the word in seconds.
43
- - `end`: The end time of the word in seconds.
44
- - `energy`: A numeric value (RMS) indicating the pronunciation's energy/loudness.
45
-
46
- **Your Analysis and Output:**
47
- Your entire response MUST be in English. You must return a single, valid JSON object with the following structure. Do not include any text outside of this JSON object.
48
-
49
- **JSON Output Structure:**
50
  {
51
- "overall_score_100": integer,
52
- "cefr_level": "string (A1, A2, B1, B2, C1, or C2)",
53
- "holistic_feedback": {
54
- "strengths": "string (A paragraph in English summarizing the student's strong points in pronunciation, rhythm, and clarity.)",
55
- "areas_for_improvement": "string (A paragraph in English detailing the main patterns of error and what to focus on.)"
 
 
56
  },
57
- "word_by_word_analysis": [
58
- {
59
- "reference_word": "string (The word from the correct sentence)",
60
- "spoken_word": "string (The word Whisper transcribed, or 'OMITTED')",
61
- "word_score_100": integer,
62
- "correct_ipa": "string (The correct IPA transcription)",
63
- "feedback": "string (Specific phonetic feedback for this word. If correct, simply state 'Excellent pronunciation.')"
64
- }
65
- ]
66
  }
67
  """
68
 
69
- # --- 1. EXTRACCI脫N DE CARACTER脥STICAS (Sin cambios) ---
70
- def extract_word_level_features(audio_path):
71
- try:
72
- y, sr = librosa.load(audio_path, sr=16000)
73
- result = whisper_model.transcribe(audio_path, word_timestamps=True, fp16=False)
74
- if not result["segments"] or 'words' not in result["segments"][0]:
75
- return []
76
- word_segments = result["segments"][0]["words"]
77
- features_list = []
78
- for segment in word_segments:
79
- start_sample = int(segment['start'] * sr)
80
- end_sample = int(segment['end'] * sr)
81
- word_audio = y[start_sample:end_sample]
82
- rms_energy = np.mean(librosa.feature.rms(y=word_audio)) if len(word_audio) > 0 else 0
83
- features_list.append({"word": segment['word'].strip(), "start": round(segment['start'], 2), "end": round(segment['end'], 2), "energy": round(float(rms_energy), 4)})
84
- return features_list
85
- except Exception as e:
86
- print(f"Error during feature extraction: {e}")
87
- return []
88
-
89
- # --- 2. FUNCI脫N PRINCIPAL DE EVALUACI脫N (Sin cambios) ---
90
- def run_evaluation(audio_input, reference_transcript):
91
- if not api_key_found: raise gr.Error("OpenAI API key not found.")
92
- if audio_input is None or not reference_transcript:
93
- return 0, "N/A", "Please provide both an audio file and the reference text.", None
94
 
 
 
 
 
 
 
95
  sr, y = audio_input
96
  temp_audio_path = "temp_audio.wav"
97
  sf.write(temp_audio_path, y, sr)
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- word_features = extract_word_level_features(temp_audio_path)
100
- if not word_features:
101
- return 0, "N/A", "Could not process the audio. Please try recording again.", None
102
-
103
- prompt_data = {"reference_transcript": reference_transcript, "spoken_words": word_features}
104
 
105
- print("Sending detailed data to GPT-4o for analysis...")
106
- response = client.chat.completions.create(
107
- model="gpt-4o", response_format={"type": "json_object"},
108
- messages=[{"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": json.dumps(prompt_data)}]
109
- )
110
-
111
- try:
112
- result = json.loads(response.choices[0].message.content)
113
- holistic_feedback_md = f"### Strengths\n{result['holistic_feedback']['strengths']}\n\n"
114
- holistic_feedback_md += f"### Areas for Improvement\n{result['holistic_feedback']['areas_for_improvement']}"
115
- word_analysis_df = pd.DataFrame(result['word_by_word_analysis'])
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- return (result.get("overall_score_100", 0), result.get("cefr_level", "N/A"), holistic_feedback_md,
118
- gr.DataFrame(value=word_analysis_df, headers=["Reference Word", "Spoken Word", "Score", "Correct IPA", "Feedback"], interactive=False))
119
- except (json.JSONDecodeError, KeyError) as e:
120
- print(f"Error processing API response: {e}")
121
- error_msg = "The API response was not in the expected format. Please try again."
122
- return 0, "Error", error_msg, None
123
-
124
- # --- 3. INTERFAZ DE GRADIO (Con las nuevas adecuaciones) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
126
- gr.Markdown("# 馃嚞馃嚙 Expert Pronunciation Assessment")
127
- gr.Markdown("Choose a tongue twister or write your own sentence. Record yourself, and our AI examiner will provide a detailed diagnostic report.")
128
-
129
- # --- NUEVO: Selector de Trabalenguas ---
130
- tongue_twister_selector = gr.Dropdown(
131
- choices=TONGUE_TWISTERS,
132
- label="Or Choose a Tongue Twister to Practice",
133
- info="Selecting one will automatically fill the reference sentence below."
134
- )
135
 
136
- with gr.Row():
137
- with gr.Column(scale=1):
138
- audio_in = gr.Audio(sources=["microphone"], type="numpy", label="1. Record Your Voice")
139
- text_in = gr.Textbox(lines=3, label="2. Reference Sentence", value=TONGUE_TWISTERS[0])
140
- submit_btn = gr.Button("Get Assessment", variant="primary")
141
-
142
- with gr.Column(scale=2):
143
- gr.Markdown("### Assessment Summary")
144
  with gr.Row():
145
- score_out = gr.Number(label="Overall Score (0-100)", interactive=False)
146
- level_out = gr.Textbox(label="Estimated CEFR Level", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
147
 
148
- holistic_feedback_out = gr.Markdown(label="Examiner's Feedback")
149
-
150
- gr.Markdown("--- \n ### Detailed Word-by-Word Analysis")
151
- word_analysis_out = gr.DataFrame(headers=["Reference Word", "Spoken Word", "Score", "Correct IPA", "Feedback"], label="Phonetic Breakdown", wrap=True)
152
-
153
- # --- NUEVO: L贸gica de Interacci贸n ---
154
- # Cuando el dropdown cambia, actualiza el campo de texto.
155
- def update_text(choice):
156
- return gr.Textbox(value=choice)
157
- tongue_twister_selector.change(fn=update_text, inputs=tongue_twister_selector, outputs=text_in)
158
-
159
- submit_btn.click(
160
- fn=run_evaluation,
161
- inputs=[audio_in, text_in],
162
- outputs=[score_out, level_out, holistic_feedback_out, word_analysis_out]
163
- )
164
 
165
  if __name__ == "__main__":
166
  if not api_key_found:
 
17
  except TypeError:
18
  api_key_found = False
19
 
20
+ print("Loading Whisper for transcription...")
21
+ # Usamos el modelo 'base' que es un buen compromiso entre velocidad y precisi贸n
22
+ whisper_model = whisper.load_model("base", device="cpu")
23
  print("Whisper model loaded.")
24
 
25
+
26
+ # --- 1. DEFINICI脫N DE PROMPTS PARA LA IA ---
27
+
28
+ # Prompt para mantener la conversaci贸n
29
+ CONVERSATION_SYSTEM_PROMPT = """
30
+ You are a friendly and encouraging English language tutor named Alex.
31
+ A student will speak to you. Your task is to keep a natural, simple conversation going.
32
+ 1. Briefly analyze the user's previous response to estimate their CEFR level (A1, A2, B1, etc.).
33
+ 2. Formulate a simple, open-ended follow-up question that is appropriate for THAT estimated level.
34
+ 3. Your entire response must be a single, short paragraph in natural, conversational English. DO NOT use JSON.
35
+ """
36
+
37
+ # Prompt para la evaluaci贸n final
38
+ FINAL_EVALUATION_SYSTEM_PROMPT = """
39
+ You are an expert English language examiner providing a final report. Analyze the entire conversation history provided.
40
+
41
+ Your task is to return a single, valid JSON object with the following structure. Do not include any text outside this JSON object.
42
+
43
+ JSON Output Structure:
 
 
 
 
 
 
 
44
  {
45
+ "cefr_level": "string (e.g., A2, B1)",
46
+ "feedback_en": {
47
+ "strengths": "string (A paragraph summarizing the student's strong points in pronunciation, vocabulary, and fluency.)",
48
+ "areas_for_improvement": "string (A paragraph detailing the main patterns of error and what to focus on.)",
49
+ "word_by_word_feedback": [
50
+ {"word": "string", "feedback": "string (Specific phonetic or usage feedback.)"}
51
+ ]
52
  },
53
+ "feedback_es": {
54
+ "fortalezas": "string (Un p谩rrafo resumiendo los puntos fuertes del estudiante en pronunciaci贸n, vocabulario y fluidez.)",
55
+ "areas_a_mejorar": "string (Un p谩rrafo detallando los patrones de error principales y en qu茅 enfocarse.)",
56
+ "feedback_por_palabra": [
57
+ {"palabra": "string", "feedback": "string (Retroalimentaci贸n fon茅tica o de uso espec铆fica.)"}
58
+ ]
59
+ }
 
 
60
  }
61
  """
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
+ # --- 2. FUNCIONES L脫GICAS ---
65
+
66
+ def transcribe_audio(audio_input):
67
+ """Transcribe el audio usando la API de Whisper de OpenAI."""
68
+ if audio_input is None:
69
+ return ""
70
  sr, y = audio_input
71
  temp_audio_path = "temp_audio.wav"
72
  sf.write(temp_audio_path, y, sr)
73
+ with open(temp_audio_path, "rb") as audio_file:
74
+ transcript = client.audio.transcriptions.create(
75
+ model="whisper-1",
76
+ file=audio_file
77
+ ).text
78
+ return transcript
79
+
80
+ def chat_interaction(audio_input, history_state):
81
+ """
82
+ Gestiona una vuelta de la conversaci贸n.
83
+ """
84
+ if not api_key_found: raise gr.Error("OpenAI API key not found.")
85
+ if audio_input is None: return history_state, history_state, "", ""
86
 
87
+ # 1. Transcribir el audio del usuario
88
+ user_text = transcribe_audio(audio_input)
89
+
90
+ # 2. Actualizar el historial con el mensaje del usuario
91
+ history_state.append({"role": "user", "content": user_text})
92
 
93
+ # Formatear para el chatbot de Gradio
94
+ chat_display = []
95
+ for i, msg in enumerate(history_state):
96
+ if msg['role'] == 'user':
97
+ chat_display.append((msg['content'], None))
98
+ elif msg['role'] == 'assistant':
99
+ if chat_display and chat_display[-1][1] is None:
100
+ chat_display[-1] = (chat_display[-1][0], msg['content'])
101
+
102
+ # 3. Decidir si continuar la conversaci贸n o dar el reporte final
103
+ if len(history_state) < 9: # 1 system + 4 pares de user/assistant
104
+ # --- Continuar conversaci贸n ---
105
+ messages_to_send = [{"role": "system", "content": CONVERSATION_SYSTEM_PROMPT}] + history_state
106
+
107
+ response = client.chat.completions.create(
108
+ model="gpt-4o",
109
+ messages=messages_to_send,
110
+ temperature=0.7
111
+ )
112
+ ai_response = response.choices[0].message.content
113
+ history_state.append({"role": "assistant", "content": ai_response})
114
+ chat_display[-1] = (chat_display[-1][0], ai_response)
115
 
116
+ return chat_display, history_state, gr.Markdown(visible=False), gr.Markdown(visible=False)
117
+
118
+ else:
119
+ # --- Generar evaluaci贸n final ---
120
+ print("Generating final evaluation...")
121
+ messages_to_send = [{"role": "system", "content": FINAL_EVALUATION_SYSTEM_PROMPT}] + history_state
122
+
123
+ response = client.chat.completions.create(
124
+ model="gpt-4o",
125
+ response_format={"type": "json_object"},
126
+ messages=messages_to_send
127
+ )
128
+
129
+ try:
130
+ result = json.loads(response.choices[0].message.content)
131
+
132
+ # Formatear el feedback en Ingl茅s
133
+ fb_en = result.get('feedback_en', {})
134
+ md_en = f"## Final Report (CEFR Level: {result.get('cefr_level', 'N/A')})\n"
135
+ md_en += f"### Strengths\n{fb_en.get('strengths', '')}\n"
136
+ md_en += f"### Areas for Improvement\n{fb_en.get('areas_for_improvement', '')}\n"
137
+ md_en += "### Word-by-Word Feedback\n"
138
+ for item in fb_en.get('word_by_word_feedback', []):
139
+ md_en += f"- **{item['word']}**: {item['feedback']}\n"
140
+
141
+ # Formatear el feedback en Espa帽ol
142
+ fb_es = result.get('feedback_es', {})
143
+ md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n"
144
+ md_es += f"### Fortalezas\n{fb_es.get('fortalezas', '')}\n"
145
+ md_es += f"### 脕reas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n"
146
+ md_es += "### Retroalimentaci贸n por Palabra\n"
147
+ for item in fb_es.get('feedback_por_palabra', []):
148
+ md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
149
+
150
+ # Mensaje final para el chat
151
+ final_message = "Thank you for the conversation! Here is your final report."
152
+ chat_display[-1] = (chat_display[-1][0], final_message)
153
+
154
+ return chat_display, history_state, gr.Markdown(value=md_en, visible=True), gr.Markdown(value=md_es, visible=True)
155
+
156
+ except (json.JSONDecodeError, KeyError) as e:
157
+ print(f"Error parsing final report: {e}")
158
+ return chat_display, history_state, gr.Markdown(value="Error generating report.", visible=True), gr.Markdown(visible=False)
159
+
160
+ # --- 3. INTERFAZ DE GRADIO ---
161
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
162
+ gr.Markdown("# 馃嚞馃嚙 AI English Speaking Practice & Assessment")
 
 
 
 
 
 
 
 
163
 
164
+ with gr.Tabs():
165
+ with gr.TabItem("Pr谩ctica Conversacional (Chat AI)"):
 
 
 
 
 
 
166
  with gr.Row():
167
+ with gr.Column(scale=2):
168
+ chatbot = gr.Chatbot(
169
+ value=[(None, "Hi there! I'm Alex. How are you doing today?")],
170
+ label="Conversation with your AI Tutor"
171
+ )
172
+ audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
173
+ with gr.Column(scale=1):
174
+ gr.Markdown("### Final Report")
175
+ feedback_en_out = gr.Markdown(label="English Feedback", visible=False)
176
+ feedback_es_out = gr.Markdown(label="Retroalimentaci贸n en Espa帽ol", visible=False)
177
+
178
+ # Estado para guardar el historial de la conversaci贸n
179
+ history = gr.State([])
180
 
181
+ audio_in_chat.stop_recording(
182
+ fn=chat_interaction,
183
+ inputs=[audio_in_chat, history],
184
+ outputs=[chatbot, history, feedback_en_out, feedback_es_out]
185
+ )
186
+
187
+ with gr.TabItem("Evaluaci贸n por Frase"):
188
+ gr.Markdown("This is a placeholder for the original sentence evaluation tool.")
189
+ # Aqu铆 pegar铆as la interfaz de la herramienta anterior si quisieras combinar ambas.
 
 
 
 
 
 
 
190
 
191
  if __name__ == "__main__":
192
  if not api_key_found: