mramirez2001 commited on
Commit
a9d2d80
verified
1 Parent(s): 52b74b5

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -66
app.py CHANGED
@@ -10,7 +10,7 @@ import soundfile as sf
10
  import whisper
11
  import pandas as pd
12
  from gtts import gTTS
13
- import re # Necesario para limpiar nombres de archivo
14
 
15
  # --- 0. CONFIGURACI脫N INICIAL ---
16
  try:
@@ -20,15 +20,14 @@ except TypeError:
20
  api_key_found = False
21
 
22
  print("Loading Whisper for transcription...")
23
- # Usamos el modelo 'base' que es un buen compromiso entre velocidad y precisi贸n
24
- whisper_model = whisper.load_model("base", device="cpu")
25
  print("Whisper model loaded.")
26
 
27
 
28
  # --- 1. DEFINICI脫N DE PROMPTS PARA LA IA ---
29
 
30
  CONVERSATION_SYSTEM_PROMPT = """
31
- You are a friendly and encouraging English language tutor named Alex.
32
  A student will speak to you. Your task is to keep a natural, simple conversation going.
33
  1. Briefly analyze the user's previous response to estimate their CEFR level (A1, A2, B1, etc.).
34
  2. Formulate a simple, open-ended follow-up question that is appropriate for THAT estimated level.
@@ -108,85 +107,44 @@ def chat_interaction(audio_input, history_state):
108
  except (json.JSONDecodeError, KeyError) as e:
109
  print(f"Error parsing final report: {e}"); return chat_display, history_state, gr.Markdown(value="Error generating report.", visible=True), gr.Markdown(visible=False)
110
 
111
- # --- CAMBIO: La funci贸n de evaluaci贸n ahora devuelve Markdown ---
112
  def run_sentence_evaluation(audio_input, reference_transcript):
113
  if not api_key_found: raise gr.Error("OpenAI API key not found.")
114
  if audio_input is None or not reference_transcript:
115
  return 0, "N/A", "Please provide both an audio file and the reference text.", ""
116
-
117
- sr, y = audio_input
118
- temp_audio_path = "temp_audio_sentence.wav"
119
- sf.write(temp_audio_path, y, sr)
120
-
121
  word_features = extract_word_level_features(temp_audio_path)
122
  if not word_features:
123
- return 0, "N/A", "Could not process the audio.", None
124
-
125
  prompt_data = {"reference_transcript": reference_transcript, "spoken_words": word_features}
126
-
127
  print("Sending detailed data to GPT-4o for sentence analysis...")
128
- response = client.chat.completions.create(
129
- model="gpt-4o",
130
- response_format={"type": "json_object"},
131
- messages=[{"role": "system", "content": SENTENCE_EVALUATION_SYSTEM_PROMPT}, {"role": "user", "content": json.dumps(prompt_data)}]
132
- )
133
-
134
  try:
135
  result = json.loads(response.choices[0].message.content)
136
  holistic_feedback_md = f"### Strengths\n{result['holistic_feedback']['strengths']}\n\n### Areas for Improvement\n{result['holistic_feedback']['areas_for_improvement']}"
137
  word_analysis_list = result['word_by_word_analysis']
138
-
139
- # --- NUEVO: Construir una tabla en Markdown ---
140
- md_table = "| Reference Word | Spoken Word | Score | Feedback (EN) | Feedback (ES) | Reference Audio |\n"
141
- md_table += "| :--- | :--- | :---: | :--- | :--- | :---: |\n"
142
-
143
- os.makedirs("reference_audio", exist_ok=True)
144
-
145
  for index, item in enumerate(word_analysis_list):
146
- word_to_speak = item['reference_word']
147
- safe_filename = re.sub(r'\W+', '', word_to_speak.lower())
148
- audio_path = f"reference_audio/{index}_{safe_filename}.mp3"
149
-
150
  try:
151
- tts = gTTS(text=word_to_speak, lang='en')
152
- tts.save(audio_path)
153
- # Embed the audio using an HTML5 tag within the Markdown table
154
- audio_player = f'<audio src="file/{audio_path}" controls></audio>'
155
  except Exception as e:
156
- print(f"Error al generar TTS para '{word_to_speak}': {e}")
157
- audio_player = "Error"
158
-
159
- # Add a row to the Markdown table
160
- md_table += (f"| **{item['reference_word']}** "
161
- f"| {item['spoken_word']} "
162
- f"| {item['word_score_100']} "
163
- f"| {item['feedback_en']} "
164
- f"| {item['feedback_es']} "
165
- f"| {audio_player} |\n")
166
-
167
- return (
168
- result.get("overall_score_100", 0),
169
- result.get("cefr_level", "N/A"),
170
- holistic_feedback_md,
171
- md_table # Return the Markdown string instead of a DataFrame
172
- )
173
  except (json.JSONDecodeError, KeyError) as e:
174
- print(f"Error processing API response: {e}")
175
- error_msg = "The API response was not in the expected format."
176
  return 0, "Error", error_msg, ""
177
 
178
-
179
-
180
- # --- 3. INTERFAZ DE GRADIO CON PESTA脩AS (Con salida Markdown) ---
181
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
182
  gr.Markdown("# 馃嚞馃嚙 AI English Speaking Practice & Assessment")
183
-
184
  with gr.Tabs():
185
- # --- PESTA脩A 1: CHAT AI (sin cambios) ---
186
  with gr.TabItem("Pr谩ctica Conversacional (Chat AI)"):
187
  with gr.Row():
188
  with gr.Column(scale=2):
189
- chatbot = gr.Chatbot(value=[(None, "Hi there! I'm Alex. How are you doing today?")], label="Conversation with your AI Tutor")
190
  audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
191
  with gr.Column(scale=1):
192
  gr.Markdown("### Final Report"); feedback_en_out = gr.Markdown(label="English Feedback", visible=False); feedback_es_out = gr.Markdown(label="Retroalimentaci贸n en Espa帽ol", visible=False)
@@ -205,7 +163,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
205
  submit_btn_sentence = gr.Button("Get Assessment", variant="primary")
206
  with gr.Column(scale=2):
207
  gr.Markdown("### Assessment Summary")
208
- with gr.Row():
209
  score_out_sentence = gr.Number(label="Overall Score (0-100)", interactive=False)
210
  level_out_sentence = gr.Textbox(label="Estimated CEFR Level", interactive=False)
211
  holistic_feedback_out_sentence = gr.Markdown(label="Examiner's Feedback")
@@ -213,11 +171,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
213
  word_analysis_out_sentence = gr.Markdown(label="Phonetic Breakdown")
214
  def update_text(choice): return gr.Textbox(value=choice)
215
  tongue_twister_selector.change(fn=update_text, inputs=tongue_twister_selector, outputs=text_in_sentence)
216
- submit_btn_sentence.click(
217
- fn=run_sentence_evaluation,
218
- inputs=[audio_in_sentence, text_in_sentence],
219
- outputs=[score_out_sentence, level_out_sentence, holistic_feedback_out_sentence, word_analysis_out_sentence]
220
- )
221
  if __name__ == "__main__":
222
- if not api_key_found: print("\nFATAL: OpenAI API key not found.")
223
  else: demo.launch(debug=True)
 
10
  import whisper
11
  import pandas as pd
12
  from gtts import gTTS
13
+ import re
14
 
15
  # --- 0. CONFIGURACI脫N INICIAL ---
16
  try:
 
20
  api_key_found = False
21
 
22
  print("Loading Whisper for transcription...")
23
+ whisper_model = whisper.load_model("base", device="cpu")
 
24
  print("Whisper model loaded.")
25
 
26
 
27
  # --- 1. DEFINICI脫N DE PROMPTS PARA LA IA ---
28
 
29
  CONVERSATION_SYSTEM_PROMPT = """
30
+ You are a friendly and encouraging English language tutor named Alex.
31
  A student will speak to you. Your task is to keep a natural, simple conversation going.
32
  1. Briefly analyze the user's previous response to estimate their CEFR level (A1, A2, B1, etc.).
33
  2. Formulate a simple, open-ended follow-up question that is appropriate for THAT estimated level.
 
107
  except (json.JSONDecodeError, KeyError) as e:
108
  print(f"Error parsing final report: {e}"); return chat_display, history_state, gr.Markdown(value="Error generating report.", visible=True), gr.Markdown(visible=False)
109
 
 
110
  def run_sentence_evaluation(audio_input, reference_transcript):
111
  if not api_key_found: raise gr.Error("OpenAI API key not found.")
112
  if audio_input is None or not reference_transcript:
113
  return 0, "N/A", "Please provide both an audio file and the reference text.", ""
114
+ sr, y = audio_input; temp_audio_path = "temp_audio_sentence.wav"; sf.write(temp_audio_path, y, sr)
 
 
 
 
115
  word_features = extract_word_level_features(temp_audio_path)
116
  if not word_features:
117
+ return 0, "N/A", "Could not process the audio.", ""
 
118
  prompt_data = {"reference_transcript": reference_transcript, "spoken_words": word_features}
 
119
  print("Sending detailed data to GPT-4o for sentence analysis...")
120
+ response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=[{"role": "system", "content": SENTENCE_EVALUATION_SYSTEM_PROMPT}, {"role": "user", "content": json.dumps(prompt_data)}])
 
 
 
 
 
121
  try:
122
  result = json.loads(response.choices[0].message.content)
123
  holistic_feedback_md = f"### Strengths\n{result['holistic_feedback']['strengths']}\n\n### Areas for Improvement\n{result['holistic_feedback']['areas_for_improvement']}"
124
  word_analysis_list = result['word_by_word_analysis']
125
+ md_table = "| Reference Word | Spoken Word | Score | Feedback (EN) | Feedback (ES) | Reference Audio |\n| :--- | :--- | :---: | :--- | :--- | :---: |\n"
126
+ os.makedirs("reference_audio", exist_ok=True)
 
 
 
 
 
127
  for index, item in enumerate(word_analysis_list):
128
+ word_to_speak = item['reference_word']; safe_filename = re.sub(r'\W+', '', word_to_speak.lower()); audio_path = f"reference_audio/{index}_{safe_filename}.mp3"
 
 
 
129
  try:
130
+ tts = gTTS(text=word_to_speak, lang='en'); tts.save(audio_path); audio_player = f'<audio src="file/{audio_path}" controls></audio>'
 
 
 
131
  except Exception as e:
132
+ print(f"Error al generar TTS para '{word_to_speak}': {e}"); audio_player = "Error"
133
+ md_table += (f"| **{item['reference_word']}** | {item['spoken_word']} | {item['word_score_100']} | {item['feedback_en']} | {item['feedback_es']} | {audio_player} |\n")
134
+ return (result.get("overall_score_100", 0), result.get("cefr_level", "N/A"), holistic_feedback_md, md_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  except (json.JSONDecodeError, KeyError) as e:
136
+ print(f"Error processing API response: {e}"); error_msg = "The API response was not in the expected format."
 
137
  return 0, "Error", error_msg, ""
138
 
139
+ # --- 3. INTERFAZ DE GRADIO CON PESTA脩AS ---
 
 
140
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
141
  gr.Markdown("# 馃嚞馃嚙 AI English Speaking Practice & Assessment")
 
142
  with gr.Tabs():
143
+ # --- PESTA脩A 1: CHAT AI ---
144
  with gr.TabItem("Pr谩ctica Conversacional (Chat AI)"):
145
  with gr.Row():
146
  with gr.Column(scale=2):
147
+ chatbot = gr.Chatbot(value=[(None, "Hi there! I'm Alex. How are you doing today?")], label="Conversation with your AI Tutor", height=500)
148
  audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
149
  with gr.Column(scale=1):
150
  gr.Markdown("### Final Report"); feedback_en_out = gr.Markdown(label="English Feedback", visible=False); feedback_es_out = gr.Markdown(label="Retroalimentaci贸n en Espa帽ol", visible=False)
 
163
  submit_btn_sentence = gr.Button("Get Assessment", variant="primary")
164
  with gr.Column(scale=2):
165
  gr.Markdown("### Assessment Summary")
166
+ with gr.Row():
167
  score_out_sentence = gr.Number(label="Overall Score (0-100)", interactive=False)
168
  level_out_sentence = gr.Textbox(label="Estimated CEFR Level", interactive=False)
169
  holistic_feedback_out_sentence = gr.Markdown(label="Examiner's Feedback")
 
171
  word_analysis_out_sentence = gr.Markdown(label="Phonetic Breakdown")
172
  def update_text(choice): return gr.Textbox(value=choice)
173
  tongue_twister_selector.change(fn=update_text, inputs=tongue_twister_selector, outputs=text_in_sentence)
174
+ submit_btn_sentence.click(fn=run_sentence_evaluation, inputs=[audio_in_sentence, text_in_sentence], outputs=[score_out_sentence, level_out_sentence, holistic_feedback_out_sentence, word_analysis_out_sentence])
175
+
 
 
 
176
  if __name__ == "__main__":
177
+ if not api_key_found: print("\nFATAL: OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.")
178
  else: demo.launch(debug=True)