Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -22,12 +22,11 @@ except TypeError:
|
|
| 22 |
api_key_found = False
|
| 23 |
|
| 24 |
print("Loading Whisper for transcription...")
|
| 25 |
-
whisper_model = whisper.load_model("base", device="cpu")
|
| 26 |
print("Whisper model loaded.")
|
| 27 |
|
| 28 |
|
| 29 |
# --- 1. DEFINICI脫N DE PROMPTS PARA LA IA ---
|
| 30 |
-
# (Estos prompts se mantienen igual que en tu versi贸n)
|
| 31 |
|
| 32 |
CONVERSATION_SYSTEM_PROMPT = """
|
| 33 |
You are a friendly and encouraging English language tutor named Alex.
|
|
@@ -61,20 +60,27 @@ JSON Output Structure:
|
|
| 61 |
}
|
| 62 |
"""
|
| 63 |
|
| 64 |
-
# --- 2. FUNCIONES L脫GICAS
|
| 65 |
|
| 66 |
def extract_word_level_features(audio_path):
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
def chat_interaction(audio_input, history_state):
|
| 71 |
if not api_key_found: raise gr.Error("OpenAI API key not found.")
|
| 72 |
-
if audio_input is None:
|
| 73 |
-
user_turns = len(history_state[1:]) // 2 if history_state else 0
|
| 74 |
-
responses_remaining = 5 - user_turns
|
| 75 |
-
# Muestra el estado actual sin hacer nada si no hay audio
|
| 76 |
-
chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state), 2)]
|
| 77 |
-
return chat_display, history_state, f"Responses remaining: {responses_remaining}", gr.update(visible=False), gr.update(visible=False)
|
| 78 |
|
| 79 |
sr, y = audio_input; temp_audio_path = "temp_audio_chat.wav"; sf.write(temp_audio_path, y, sr)
|
| 80 |
user_text = client.audio.transcriptions.create(model="whisper-1", file=open(temp_audio_path, "rb")).text
|
|
@@ -84,17 +90,16 @@ def chat_interaction(audio_input, history_state):
|
|
| 84 |
|
| 85 |
history_state.append({"role": "user", "content": user_text})
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
if user_turns < 5:
|
| 91 |
response = client.chat.completions.create(model="gpt-4o", messages=history_state, temperature=0.7)
|
| 92 |
ai_response = response.choices[0].message.content
|
| 93 |
history_state.append({"role": "assistant", "content": ai_response})
|
| 94 |
-
chat_display
|
| 95 |
-
return chat_display, history_state,
|
| 96 |
-
else:
|
| 97 |
-
print("Generating final evaluation...")
|
| 98 |
final_messages = [{"role": "system", "content": FINAL_EVALUATION_SYSTEM_PROMPT}] + history_state[1:]
|
| 99 |
response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=final_messages)
|
| 100 |
try:
|
|
@@ -103,68 +108,55 @@ def chat_interaction(audio_input, history_state):
|
|
| 103 |
for item in fb_en.get('word_by_word_feedback', []): md_en += f"- **{item['word']}**: {item['feedback']}\n"
|
| 104 |
fb_es = result.get('feedback_es', {}); md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n### Fortalezas\n{fb_es.get('fortalezas', '')}\n### 脕reas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n### Retroalimentaci贸n por Palabra\n"
|
| 105 |
for item in fb_es.get('feedback_por_palabra', []): md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
|
| 106 |
-
|
| 107 |
-
chat_display
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
# --- CAMBIO CLAVE: Reiniciamos el historial para la siguiente conversaci贸n ---
|
| 111 |
-
return chat_display, [], "Conversation finished!", gr.update(value=md_en, visible=True), gr.update(value=md_es, visible=True)
|
| 112 |
-
except Exception as e:
|
| 113 |
-
print(f"Error parsing final report: {e}")
|
| 114 |
-
chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state)-1, 2)]
|
| 115 |
-
return chat_display, [], "Error!", gr.update(value="Error generating report.", visible=True), gr.update(visible=False)
|
| 116 |
|
| 117 |
def run_sentence_evaluation(audio_input, reference_transcript):
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 123 |
gr.Markdown("# 馃嚞馃嚙 AI English Speaking Practice & Assessment")
|
| 124 |
-
|
| 125 |
with gr.Tabs():
|
| 126 |
-
# --- PESTA脩A 1: CHAT AI
|
| 127 |
with gr.TabItem("Pr谩ctica Conversacional (Chat AI)"):
|
| 128 |
with gr.Row():
|
| 129 |
with gr.Column(scale=2):
|
| 130 |
chatbot = gr.Chatbot(value=[(None, "Hi there! I'm Alex. How are you doing today?")], label="Conversation with your AI Tutor", height=500)
|
| 131 |
audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
|
| 132 |
-
with gr.Row():
|
| 133 |
-
counter_out = gr.Textbox(value="Responses remaining: 5", label="Conversation Progress", interactive=False)
|
| 134 |
-
# --- CAMBIO: Bot贸n para reiniciar la conversaci贸n completa ---
|
| 135 |
-
new_conversation_btn = gr.Button("New Conversation")
|
| 136 |
-
|
| 137 |
with gr.Column(scale=1):
|
| 138 |
-
gr.Markdown("### Final Report")
|
| 139 |
-
feedback_en_out = gr.Markdown(label="English Feedback", visible=False)
|
| 140 |
-
feedback_es_out = gr.Markdown(label="Retroalimentaci贸n en Espa帽ol", visible=False)
|
| 141 |
-
|
| 142 |
history = gr.State([])
|
| 143 |
-
|
| 144 |
-
# Funci贸n para borrar el audio despu茅s de enviarlo (no es un bot贸n, es una acci贸n)
|
| 145 |
-
def clear_audio_input():
|
| 146 |
-
return None
|
| 147 |
-
|
| 148 |
-
# Funci贸n para reiniciar toda la conversaci贸n
|
| 149 |
-
def clear_conversation():
|
| 150 |
-
return [], [(None, "Hi there! I'm Alex. How are you doing today?")], "Responses remaining: 5", gr.update(visible=False), gr.update(visible=False), None
|
| 151 |
-
|
| 152 |
-
# --- CAMBIO: Se renombra el bot贸n y se conecta a la nueva funci贸n de reinicio ---
|
| 153 |
-
new_conversation_btn.click(
|
| 154 |
-
fn=clear_conversation,
|
| 155 |
-
inputs=[],
|
| 156 |
-
outputs=[history, chatbot, counter_out, feedback_en_out, feedback_es_out, audio_in_chat]
|
| 157 |
-
)
|
| 158 |
-
|
| 159 |
-
audio_in_chat.stop_recording(
|
| 160 |
-
fn=chat_interaction,
|
| 161 |
-
inputs=[audio_in_chat, history],
|
| 162 |
-
outputs=[chatbot, history, counter_out, feedback_en_out, feedback_es_out]
|
| 163 |
-
).then(
|
| 164 |
-
fn=clear_audio_input,
|
| 165 |
-
inputs=[],
|
| 166 |
-
outputs=[audio_in_chat]
|
| 167 |
-
)
|
| 168 |
|
| 169 |
# --- PESTA脩A 2: EVALUACI脫N POR FRASE ---
|
| 170 |
with gr.TabItem("Evaluaci贸n por Frase"):
|
|
@@ -188,7 +180,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 188 |
tongue_twister_selector.change(fn=update_text, inputs=tongue_twister_selector, outputs=text_in_sentence)
|
| 189 |
submit_btn_sentence.click(fn=run_sentence_evaluation, inputs=[audio_in_sentence, text_in_sentence], outputs=[score_out_sentence, level_out_sentence, holistic_feedback_out_sentence, word_analysis_out_sentence])
|
| 190 |
|
| 191 |
-
|
| 192 |
if __name__ == "__main__":
|
| 193 |
-
if not api_key_found: print("\nFATAL: OpenAI API key not found.")
|
| 194 |
else: demo.launch(debug=True)
|
|
|
|
| 22 |
api_key_found = False
|
| 23 |
|
| 24 |
print("Loading Whisper for transcription...")
|
| 25 |
+
whisper_model = whisper.load_model("base", device="cpu")
|
| 26 |
print("Whisper model loaded.")
|
| 27 |
|
| 28 |
|
| 29 |
# --- 1. DEFINICI脫N DE PROMPTS PARA LA IA ---
|
|
|
|
| 30 |
|
| 31 |
CONVERSATION_SYSTEM_PROMPT = """
|
| 32 |
You are a friendly and encouraging English language tutor named Alex.
|
|
|
|
| 60 |
}
|
| 61 |
"""
|
| 62 |
|
| 63 |
+
# --- 2. FUNCIONES L脫GICAS ---
|
| 64 |
|
| 65 |
def extract_word_level_features(audio_path):
|
| 66 |
+
try:
|
| 67 |
+
y, sr = librosa.load(audio_path, sr=16000)
|
| 68 |
+
result = whisper_model.transcribe(audio_path, word_timestamps=True, fp16=False)
|
| 69 |
+
if not result["segments"] or 'words' not in result["segments"][0]: return []
|
| 70 |
+
word_segments = result["segments"][0]["words"]
|
| 71 |
+
features_list = []
|
| 72 |
+
for segment in word_segments:
|
| 73 |
+
start_sample = int(segment['start'] * sr); end_sample = int(segment['end'] * sr)
|
| 74 |
+
word_audio = y[start_sample:end_sample]
|
| 75 |
+
rms_energy = np.mean(librosa.feature.rms(y=word_audio)) if len(word_audio) > 0 else 0
|
| 76 |
+
features_list.append({"word": segment['word'].strip(), "start": round(segment['start'], 2), "end": round(segment['end'], 2), "energy": round(float(rms_energy), 4)})
|
| 77 |
+
return features_list
|
| 78 |
+
except Exception as e:
|
| 79 |
+
print(f"Error during feature extraction: {e}"); return []
|
| 80 |
|
| 81 |
def chat_interaction(audio_input, history_state):
|
| 82 |
if not api_key_found: raise gr.Error("OpenAI API key not found.")
|
| 83 |
+
if audio_input is None: return history_state, history_state, gr.Markdown(visible=False), gr.Markdown(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
sr, y = audio_input; temp_audio_path = "temp_audio_chat.wav"; sf.write(temp_audio_path, y, sr)
|
| 86 |
user_text = client.audio.transcriptions.create(model="whisper-1", file=open(temp_audio_path, "rb")).text
|
|
|
|
| 90 |
|
| 91 |
history_state.append({"role": "user", "content": user_text})
|
| 92 |
|
| 93 |
+
chat_display = [(history_state[i]['content'], history_state[i+1]['content']) for i in range(1, len(history_state), 2)]
|
| 94 |
+
|
| 95 |
+
if len(history_state) < 10:
|
|
|
|
| 96 |
response = client.chat.completions.create(model="gpt-4o", messages=history_state, temperature=0.7)
|
| 97 |
ai_response = response.choices[0].message.content
|
| 98 |
history_state.append({"role": "assistant", "content": ai_response})
|
| 99 |
+
chat_display.append((user_text, ai_response))
|
| 100 |
+
return chat_display, history_state, gr.Markdown(visible=False), gr.Markdown(visible=False)
|
| 101 |
+
else:
|
| 102 |
+
print("Generating final evaluation...");
|
| 103 |
final_messages = [{"role": "system", "content": FINAL_EVALUATION_SYSTEM_PROMPT}] + history_state[1:]
|
| 104 |
response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=final_messages)
|
| 105 |
try:
|
|
|
|
| 108 |
for item in fb_en.get('word_by_word_feedback', []): md_en += f"- **{item['word']}**: {item['feedback']}\n"
|
| 109 |
fb_es = result.get('feedback_es', {}); md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n### Fortalezas\n{fb_es.get('fortalezas', '')}\n### 脕reas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n### Retroalimentaci贸n por Palabra\n"
|
| 110 |
for item in fb_es.get('feedback_por_palabra', []): md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
|
| 111 |
+
chat_display.append((user_text, "Thank you for the conversation! Here is your final report below."))
|
| 112 |
+
return chat_display, [], gr.Markdown(value=md_en, visible=True), gr.Markdown(value=md_es, visible=True)
|
| 113 |
+
except (json.JSONDecodeError, KeyError) as e:
|
| 114 |
+
print(f"Error parsing final report: {e}"); return [], [], gr.Markdown(value="Error generating report.", visible=True), gr.Markdown(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
def run_sentence_evaluation(audio_input, reference_transcript):
|
| 117 |
+
if not api_key_found: raise gr.Error("OpenAI API key not found.")
|
| 118 |
+
if audio_input is None or not reference_transcript:
|
| 119 |
+
return 0, "N/A", "Please provide both an audio file and the reference text.", ""
|
| 120 |
+
sr, y = audio_input; temp_audio_path = "temp_audio_sentence.wav"; sf.write(temp_audio_path, y, sr)
|
| 121 |
+
word_features = extract_word_level_features(temp_audio_path)
|
| 122 |
+
if not word_features:
|
| 123 |
+
return 0, "N/A", "Could not process the audio.", ""
|
| 124 |
+
prompt_data = {"reference_transcript": reference_transcript, "spoken_words": word_features}
|
| 125 |
+
print("Sending detailed data to GPT-4o for sentence analysis...")
|
| 126 |
+
response = client.chat.completions.create(model="gpt-4o", response_format={"type": "json_object"}, messages=[{"role": "system", "content": SENTENCE_EVALUATION_SYSTEM_PROMPT}, {"role": "user", "content": json.dumps(prompt_data)}])
|
| 127 |
+
try:
|
| 128 |
+
result = json.loads(response.choices[0].message.content)
|
| 129 |
+
holistic_feedback_md = f"### Strengths\n{result['holistic_feedback']['strengths']}\n\n### Areas for Improvement\n{result['holistic_feedback']['areas_for_improvement']}"
|
| 130 |
+
word_analysis_list = result['word_by_word_analysis']
|
| 131 |
+
md_table = "| Reference Word | Spoken Word | Score | Feedback (EN) | Feedback (ES) | Reference Audio |\n| :--- | :--- | :---: | :--- | :--- | :---: |\n"
|
| 132 |
+
for index, item in enumerate(word_analysis_list):
|
| 133 |
+
word_to_speak = item['reference_word']
|
| 134 |
+
try:
|
| 135 |
+
tts = gTTS(text=word_to_speak, lang='en'); mp3_fp = io.BytesIO(); tts.write_to_fp(mp3_fp); mp3_fp.seek(0)
|
| 136 |
+
audio_base64 = base64.b64encode(mp3_fp.read()).decode('utf-8')
|
| 137 |
+
audio_player = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls></audio>'
|
| 138 |
+
except Exception as e:
|
| 139 |
+
print(f"Error al generar TTS para '{word_to_speak}': {e}"); audio_player = "Error"
|
| 140 |
+
md_table += (f"| **{item['reference_word']}** | {item['spoken_word']} | {item['word_score_100']} | {item['feedback_en']} | {item['feedback_es']} | {audio_player} |\n")
|
| 141 |
+
return (result.get("overall_score_100", 0), result.get("cefr_level", "N/A"), holistic_feedback_md, md_table)
|
| 142 |
+
except (json.JSONDecodeError, KeyError) as e:
|
| 143 |
+
print(f"Error processing API response: {e}"); error_msg = "The API response was not in the expected format."
|
| 144 |
+
return 0, "Error", error_msg, ""
|
| 145 |
+
|
| 146 |
+
# --- 3. INTERFAZ DE GRADIO CON PESTA脩AS ---
|
| 147 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 148 |
gr.Markdown("# 馃嚞馃嚙 AI English Speaking Practice & Assessment")
|
|
|
|
| 149 |
with gr.Tabs():
|
| 150 |
+
# --- PESTA脩A 1: CHAT AI ---
|
| 151 |
with gr.TabItem("Pr谩ctica Conversacional (Chat AI)"):
|
| 152 |
with gr.Row():
|
| 153 |
with gr.Column(scale=2):
|
| 154 |
chatbot = gr.Chatbot(value=[(None, "Hi there! I'm Alex. How are you doing today?")], label="Conversation with your AI Tutor", height=500)
|
| 155 |
audio_in_chat = gr.Audio(sources=["microphone"], type="numpy", label="Record your response")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
with gr.Column(scale=1):
|
| 157 |
+
gr.Markdown("### Final Report"); feedback_en_out = gr.Markdown(label="English Feedback", visible=False); feedback_es_out = gr.Markdown(label="Retroalimentaci贸n en Espa帽ol", visible=False)
|
|
|
|
|
|
|
|
|
|
| 158 |
history = gr.State([])
|
| 159 |
+
audio_in_chat.stop_recording(fn=chat_interaction, inputs=[audio_in_chat, history], outputs=[chatbot, history, feedback_en_out, feedback_es_out])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
# --- PESTA脩A 2: EVALUACI脫N POR FRASE ---
|
| 162 |
with gr.TabItem("Evaluaci贸n por Frase"):
|
|
|
|
| 180 |
tongue_twister_selector.change(fn=update_text, inputs=tongue_twister_selector, outputs=text_in_sentence)
|
| 181 |
submit_btn_sentence.click(fn=run_sentence_evaluation, inputs=[audio_in_sentence, text_in_sentence], outputs=[score_out_sentence, level_out_sentence, holistic_feedback_out_sentence, word_analysis_out_sentence])
|
| 182 |
|
|
|
|
| 183 |
if __name__ == "__main__":
|
| 184 |
+
if not api_key_found: print("\nFATAL: OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.")
|
| 185 |
else: demo.launch(debug=True)
|