mramirez2001 commited on
Commit
50ef0bd
·
verified ·
1 Parent(s): 462044e

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -90
app.py CHANGED
@@ -16,19 +16,18 @@ import io
16
 
17
  # --- 0. CONFIGURACIÓN INICIAL ---
18
  try:
19
- # Carga la clave de API desde los "Secrets" de Hugging Face
20
  client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
21
  api_key_found = True
22
  except TypeError:
23
  api_key_found = False
24
 
25
  print("Loading Whisper for transcription...")
26
- # Se usa el modelo 'base' que es un buen compromiso entre velocidad y precisión
27
- whisper_model = whisper.load_model("base", device="cpu")
28
- print("✅ Whisper model loaded.")
29
 
30
 
31
  # --- 1. DEFINICIÓN DE PROMPTS PARA LA IA ---
 
32
 
33
  CONVERSATION_SYSTEM_PROMPT = """
34
  You are a friendly and encouraging English language tutor named Alex.
@@ -62,42 +61,40 @@ JSON Output Structure:
62
  }
63
  """
64
 
65
- # --- 2. FUNCIONES LÓGICAS ---
66
 
67
  def extract_word_level_features(audio_path):
68
- try:
69
- y, sr = librosa.load(audio_path, sr=16000)
70
- result = whisper_model.transcribe(audio_path, word_timestamps=True, fp16=False)
71
- if not result["segments"] or 'words' not in result["segments"][0]: return []
72
- word_segments = result["segments"][0]["words"]
73
- features_list = []
74
- for segment in word_segments:
75
- start_sample = int(segment['start'] * sr); end_sample = int(segment['end'] * sr)
76
- word_audio = y[start_sample:end_sample]
77
- rms_energy = np.mean(librosa.feature.rms(y=word_audio)) if len(word_audio) > 0 else 0
78
- features_list.append({"word": segment['word'].strip(), "start": round(segment['start'], 2), "end": round(segment['end'], 2), "energy": round(float(rms_energy), 4)})
79
- return features_list
80
- except Exception as e:
81
- print(f"Error during feature extraction: {e}"); return []
82
 
83
  def chat_interaction(audio_input, history_state):
84
  if not api_key_found: raise gr.Error("OpenAI API key not found.")
85
- if audio_input is None: return history_state, history_state, gr.Markdown(visible=False), gr.Markdown(visible=False)
 
 
 
 
 
 
86
  sr, y = audio_input; temp_audio_path = "temp_audio_chat.wav"; sf.write(temp_audio_path, y, sr)
87
  user_text = client.audio.transcriptions.create(model="whisper-1", file=open(temp_audio_path, "rb")).text
88
- if not history_state: history_state = [{"role": "system", "content": CONVERSATION_SYSTEM_PROMPT}]
89
- history_state.append({"role": "user", "content": user_text})
90
 
91
- chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state), 2)]
 
92
 
93
- if len(history_state) < 10: # 1 system + 4 pares de user/assistant
 
 
 
 
 
94
  response = client.chat.completions.create(model="gpt-4o", messages=history_state, temperature=0.7)
95
  ai_response = response.choices[0].message.content
96
  history_state.append({"role": "assistant", "content": ai_response})
97
- chat_display.append((user_text, ai_response))
98
- return chat_display, history_state, gr.Markdown(visible=False), gr.Markdown(visible=False)
99
- else:
100
- print("Generating final evaluation...");
101
  final_messages = [{"role": "system", "content": FINAL_EVALUATION_SYSTEM_PROMPT}] + history_state[1:]
102
  response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=final_messages)
103
  try:
@@ -106,46 +103,27 @@ def chat_interaction(audio_input, history_state):
106
  for item in fb_en.get('word_by_word_feedback', []): md_en += f"- **{item['word']}**: {item['feedback']}\n"
107
  fb_es = result.get('feedback_es', {}); md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n### Fortalezas\n{fb_es.get('fortalezas', '')}\n### Áreas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n### Retroalimentación por Palabra\n"
108
  for item in fb_es.get('feedback_por_palabra', []): md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
109
- chat_display.append((user_text, "Thank you for the conversation! Here is your final report below."))
110
- return chat_display, [], gr.Markdown(value=md_en, visible=True), gr.Markdown(value=md_es, visible=True)
111
- except (json.JSONDecodeError, KeyError) as e:
112
- print(f"Error parsing final report: {e}"); return [], [], gr.Markdown(value="Error generating report.", visible=True), gr.Markdown(visible=False)
 
 
 
 
 
 
113
 
114
  def run_sentence_evaluation(audio_input, reference_transcript):
115
- if not api_key_found: raise gr.Error("OpenAI API key not found.")
116
- if audio_input is None or not reference_transcript:
117
- return 0, "N/A", "Please provide both an audio file and the reference text.", ""
118
- sr, y = audio_input; temp_audio_path = "temp_audio_sentence.wav"; sf.write(temp_audio_path, y, sr)
119
- word_features = extract_word_level_features(temp_audio_path)
120
- if not word_features:
121
- return 0, "N/A", "Could not process the audio.", ""
122
- prompt_data = {"reference_transcript": reference_transcript, "spoken_words": word_features}
123
- print("Sending detailed data to GPT-4o for sentence analysis...")
124
- response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=[{"role": "system", "content": SENTENCE_EVALUATION_SYSTEM_PROMPT}, {"role": "user", "content": json.dumps(prompt_data)}])
125
- try:
126
- result = json.loads(response.choices[0].message.content)
127
- holistic_feedback_md = f"### Strengths\n{result['holistic_feedback']['strengths']}\n\n### Areas for Improvement\n{result['holistic_feedback']['areas_for_improvement']}"
128
- word_analysis_list = result['word_by_word_analysis']
129
- md_table = "| Reference Word | Spoken Word | Score | Feedback (EN) | Feedback (ES) | Reference Audio |\n| :--- | :--- | :---: | :--- | :--- | :---: |\n"
130
- for index, item in enumerate(word_analysis_list):
131
- word_to_speak = item['reference_word']
132
- try:
133
- tts = gTTS(text=word_to_speak, lang='en'); mp3_fp = io.BytesIO(); tts.write_to_fp(mp3_fp); mp3_fp.seek(0)
134
- audio_base64 = base64.b64encode(mp3_fp.read()).decode('utf-8')
135
- audio_player = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls></audio>'
136
- except Exception as e:
137
- print(f"Error al generar TTS para '{word_to_speak}': {e}"); audio_player = "Error"
138
- md_table += (f"| **{item['reference_word']}** | {item['spoken_word']} | {item['word_score_100']} | {item['feedback_en']} | {item['feedback_es']} | {audio_player} |\n")
139
- return (result.get("overall_score_100", 0), result.get("cefr_level", "N/A"), holistic_feedback_md, md_table)
140
- except (json.JSONDecodeError, KeyError) as e:
141
- print(f"Error processing API response: {e}"); error_msg = "The API response was not in the expected format."
142
- return 0, "Error", error_msg, ""
143
-
144
- # --- 3. INTERFAZ DE GRADIO CON PESTAÑAS ---
145
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
146
  gr.Markdown("# 🇬🇧 AI English Speaking Practice & Assessment")
 
147
  with gr.Tabs():
148
- # --- PESTAÑA 1: CHAT AI ---
149
  with gr.TabItem("Práctica Conversacional (Chat AI)"):
150
  with gr.Row():
151
  with gr.Column(scale=2):
@@ -153,48 +131,46 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
153
  audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
154
  with gr.Row():
155
  counter_out = gr.Textbox(value="Responses remaining: 5", label="Conversation Progress", interactive=False)
 
156
  new_conversation_btn = gr.Button("New Conversation")
 
157
  with gr.Column(scale=1):
158
- gr.Markdown("### Final Report"); feedback_en_out = gr.Markdown(label="English Feedback", visible=False); feedback_es_out = gr.Markdown(label="Retroalimentación en Español", visible=False)
 
 
 
159
  history = gr.State([])
160
 
 
 
 
 
 
161
  def clear_conversation():
162
- return [], [(None, "Hi there! I'm Alex. How are you doing today?")], "Responses remaining: 5", gr.update(visible=False), gr.update(visible=False), gr.update(value=None)
163
 
164
- new_conversation_btn.click(fn=clear_conversation, inputs=[], outputs=[history, chatbot, counter_out, feedback_en_out, feedback_es_out, audio_in_chat])
 
 
 
 
 
165
 
166
  audio_in_chat.stop_recording(
167
- fn=chat_interaction,
168
- inputs=[audio_in_chat, history],
169
  outputs=[chatbot, history, counter_out, feedback_en_out, feedback_es_out]
170
  ).then(
171
- fn=lambda: None, # Borra el audio después de enviarlo
172
  inputs=[],
173
  outputs=[audio_in_chat]
174
  )
175
 
176
- # --- PESTAÑA 2: EVALUACIÓN POR FRASE ---
177
  with gr.TabItem("Evaluación por Frase"):
178
- TONGUE_TWISTERS = ["Peter Piper picked a peck of pickled peppers.", "She sells seashells by the seashore.", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", "Betty Botter bought some butter but she said the butter’s bitter.", "A proper copper coffee pot."]
179
- gr.Markdown("Choose a tongue twister or write your own sentence. Record yourself, and our AI examiner will provide a detailed diagnostic report.")
180
- tongue_twister_selector = gr.Dropdown(choices=TONGUE_TWISTERS, label="Or Choose a Tongue Twister to Practice")
181
- with gr.Row():
182
- with gr.Column(scale=1):
183
- audio_in_sentence = gr.Audio(sources=["microphone"], type="numpy", label="1. Record Your Voice")
184
- text_in_sentence = gr.Textbox(lines=3, label="2. Reference Sentence", value=TONGUE_TWISTERS[0])
185
- submit_btn_sentence = gr.Button("Get Assessment", variant="primary")
186
- with gr.Column(scale=2):
187
- gr.Markdown("### Assessment Summary")
188
- with gr.Row():
189
- score_out_sentence = gr.Number(label="Overall Score (0-100)", interactive=False)
190
- level_out_sentence = gr.Textbox(label="Estimated CEFR Level", interactive=False)
191
- holistic_feedback_out_sentence = gr.Markdown(label="Examiner's Feedback")
192
- gr.Markdown("--- \n ### Detailed Word-by-Word Analysis")
193
- word_analysis_out_sentence = gr.Markdown(label="Phonetic Breakdown")
194
- def update_text(choice): return gr.Textbox(value=choice)
195
- tongue_twister_selector.change(fn=update_text, inputs=tongue_twister_selector, outputs=text_in_sentence)
196
- submit_btn_sentence.click(fn=run_sentence_evaluation, inputs=[audio_in_sentence, text_in_sentence], outputs=[score_out_sentence, level_out_sentence, holistic_feedback_out_sentence, word_analysis_out_sentence])
197
 
198
  if __name__ == "__main__":
199
- if not api_key_found: print("\nFATAL: OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.")
200
  else: demo.launch(debug=True)
 
16
 
17
  # --- 0. CONFIGURACIÓN INICIAL ---
18
  try:
 
19
  client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
20
  api_key_found = True
21
  except TypeError:
22
  api_key_found = False
23
 
24
  print("Loading Whisper for transcription...")
25
+ whisper_model = whisper.load_model("base", device="cpu")
26
+ print("Whisper model loaded.")
 
27
 
28
 
29
  # --- 1. DEFINICIÓN DE PROMPTS PARA LA IA ---
30
+ # (Estos prompts se mantienen igual que en tu versión)
31
 
32
  CONVERSATION_SYSTEM_PROMPT = """
33
  You are a friendly and encouraging English language tutor named Alex.
 
61
  }
62
  """
63
 
64
+ # --- 2. FUNCIONES LÓGICAS (Con ajustes en 'chat_interaction') ---
65
 
66
  def extract_word_level_features(audio_path):
67
+ # ... (esta función se mantiene igual)
68
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def chat_interaction(audio_input, history_state):
71
  if not api_key_found: raise gr.Error("OpenAI API key not found.")
72
+ if audio_input is None:
73
+ user_turns = len(history_state[1:]) // 2 if history_state else 0
74
+ responses_remaining = 5 - user_turns
75
+ # Muestra el estado actual sin hacer nada si no hay audio
76
+ chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state), 2)]
77
+ return chat_display, history_state, f"Responses remaining: {responses_remaining}", gr.update(visible=False), gr.update(visible=False)
78
+
79
  sr, y = audio_input; temp_audio_path = "temp_audio_chat.wav"; sf.write(temp_audio_path, y, sr)
80
  user_text = client.audio.transcriptions.create(model="whisper-1", file=open(temp_audio_path, "rb")).text
 
 
81
 
82
+ if not history_state:
83
+ history_state = [{"role": "system", "content": CONVERSATION_SYSTEM_PROMPT}]
84
 
85
+ history_state.append({"role": "user", "content": user_text})
86
+
87
+ user_turns = (len(history_state) - 1) // 2
88
+ responses_remaining = 5 - user_turns
89
+
90
+ if user_turns < 5:
91
  response = client.chat.completions.create(model="gpt-4o", messages=history_state, temperature=0.7)
92
  ai_response = response.choices[0].message.content
93
  history_state.append({"role": "assistant", "content": ai_response})
94
+ chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state), 2)]
95
+ return chat_display, history_state, f"Responses remaining: {responses_remaining}", gr.update(visible=False), gr.update(visible=False)
96
+ else: # Turno 5: generar evaluación
97
+ print("Generating final evaluation...")
98
  final_messages = [{"role": "system", "content": FINAL_EVALUATION_SYSTEM_PROMPT}] + history_state[1:]
99
  response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=final_messages)
100
  try:
 
103
  for item in fb_en.get('word_by_word_feedback', []): md_en += f"- **{item['word']}**: {item['feedback']}\n"
104
  fb_es = result.get('feedback_es', {}); md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n### Fortalezas\n{fb_es.get('fortalezas', '')}\n### Áreas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n### Retroalimentación por Palabra\n"
105
  for item in fb_es.get('feedback_por_palabra', []): md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
106
+
107
+ chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state)-1, 2)]
108
+ chat_display.append((user_text, "Thank you! Your final report is now available on the right."))
109
+
110
+ # --- CAMBIO CLAVE: Reiniciamos el historial para la siguiente conversación ---
111
+ return chat_display, [], "Conversation finished!", gr.update(value=md_en, visible=True), gr.update(value=md_es, visible=True)
112
+ except Exception as e:
113
+ print(f"Error parsing final report: {e}")
114
+ chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state)-1, 2)]
115
+ return chat_display, [], "Error!", gr.update(value="Error generating report.", visible=True), gr.update(visible=False)
116
 
117
  def run_sentence_evaluation(audio_input, reference_transcript):
118
+ # ... (esta función se mantiene igual)
119
+ pass
120
+
121
+ # --- 3. INTERFAZ DE GRADIO CON PESTAÑAS (Con ajustes en la Pestaña 1) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
123
  gr.Markdown("# 🇬🇧 AI English Speaking Practice & Assessment")
124
+
125
  with gr.Tabs():
126
+ # --- PESTAÑA 1: CHAT AI (CON MEJORAS) ---
127
  with gr.TabItem("Práctica Conversacional (Chat AI)"):
128
  with gr.Row():
129
  with gr.Column(scale=2):
 
131
  audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
132
  with gr.Row():
133
  counter_out = gr.Textbox(value="Responses remaining: 5", label="Conversation Progress", interactive=False)
134
+ # --- CAMBIO: Botón para reiniciar la conversación completa ---
135
  new_conversation_btn = gr.Button("New Conversation")
136
+
137
  with gr.Column(scale=1):
138
+ gr.Markdown("### Final Report")
139
+ feedback_en_out = gr.Markdown(label="English Feedback", visible=False)
140
+ feedback_es_out = gr.Markdown(label="Retroalimentación en Español", visible=False)
141
+
142
  history = gr.State([])
143
 
144
+ # Función para borrar el audio después de enviarlo (no es un botón, es una acción)
145
+ def clear_audio_input():
146
+ return None
147
+
148
+ # Función para reiniciar toda la conversación
149
  def clear_conversation():
150
+ return [], [(None, "Hi there! I'm Alex. How are you doing today?")], "Responses remaining: 5", gr.update(visible=False), gr.update(visible=False), None
151
 
152
+ # --- CAMBIO: Se renombra el botón y se conecta a la nueva función de reinicio ---
153
+ new_conversation_btn.click(
154
+ fn=clear_conversation,
155
+ inputs=[],
156
+ outputs=[history, chatbot, counter_out, feedback_en_out, feedback_es_out, audio_in_chat]
157
+ )
158
 
159
  audio_in_chat.stop_recording(
160
+ fn=chat_interaction,
161
+ inputs=[audio_in_chat, history],
162
  outputs=[chatbot, history, counter_out, feedback_en_out, feedback_es_out]
163
  ).then(
164
+ fn=clear_audio_input,
165
  inputs=[],
166
  outputs=[audio_in_chat]
167
  )
168
 
169
+ # --- PESTAÑA 2: EVALUACIÓN POR FRASE (sin cambios) ---
170
  with gr.TabItem("Evaluación por Frase"):
171
+ # ... (todo el código de la interfaz de la pestaña 2 se mantiene igual)
172
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
  if __name__ == "__main__":
175
+ if not api_key_found: print("\nFATAL: OpenAI API key not found.")
176
  else: demo.launch(debug=True)