Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -93,16 +93,31 @@ def chat_interaction(audio_input, history_state):
|
|
| 93 |
if not history_state:
|
| 94 |
history_state = [{"role": "system", "content": CONVERSATION_SYSTEM_PROMPT}]
|
| 95 |
|
| 96 |
-
history_state.append({"role": "user", "content": user_text})
|
| 97 |
|
| 98 |
user_turns = (len(history_state) - 1) // 2
|
| 99 |
responses_remaining = 5 - user_turns
|
| 100 |
|
| 101 |
if user_turns < 5:
|
| 102 |
response = client.chat.completions.create(model="gpt-4o", messages=history_state, temperature=0.7)
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
return chat_display, history_state, f"Responses remaining: {responses_remaining}", gr.update(visible=False), gr.update(visible=False)
|
| 107 |
else: # Turno 5: generar evaluaci贸n
|
| 108 |
print("Generating final evaluation...")
|
|
@@ -115,15 +130,14 @@ def chat_interaction(audio_input, history_state):
|
|
| 115 |
fb_es = result.get('feedback_es', {}); md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n### Fortalezas\n{fb_es.get('fortalezas', '')}\n### 脕reas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n### Retroalimentaci贸n por Palabra\n"
|
| 116 |
for item in fb_es.get('feedback_por_palabra', []): md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
|
| 117 |
|
| 118 |
-
chat_display = [(
|
| 119 |
-
chat_display
|
| 120 |
|
| 121 |
# --- CAMBIO CLAVE: Reiniciamos el historial para la siguiente conversaci贸n ---
|
| 122 |
return chat_display, [], "Conversation finished!", gr.update(value=md_en, visible=True), gr.update(value=md_es, visible=True)
|
| 123 |
except Exception as e:
|
| 124 |
print(f"Error parsing final report: {e}")
|
| 125 |
-
|
| 126 |
-
return chat_display, [], "Error!", gr.update(value="Error generating report.", visible=True), gr.update(visible=False)
|
| 127 |
|
| 128 |
def run_sentence_evaluation(audio_input, reference_transcript):
|
| 129 |
if not api_key_found: raise gr.Error("OpenAI API key not found.")
|
|
@@ -202,7 +216,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 202 |
inputs=[],
|
| 203 |
outputs=[audio_in_chat]
|
| 204 |
)
|
| 205 |
-
|
| 206 |
# --- PESTA脩A 2: EVALUACI脫N POR FRASE ---
|
| 207 |
with gr.TabItem("Evaluaci贸n por Frase"):
|
| 208 |
TONGUE_TWISTERS = ["Peter Piper picked a peck of pickled peppers.", "She sells seashells by the seashore.", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", "Betty Botter bought some butter but she said the butter鈥檚 bitter.", "A proper copper coffee pot."]
|
|
|
|
| 93 |
if not history_state:
|
| 94 |
history_state = [{"role": "system", "content": CONVERSATION_SYSTEM_PROMPT}]
|
| 95 |
|
| 96 |
+
history_state.append({"role": "user", "content": user_text, "display_content": user_text})
|
| 97 |
|
| 98 |
user_turns = (len(history_state) - 1) // 2
|
| 99 |
responses_remaining = 5 - user_turns
|
| 100 |
|
| 101 |
if user_turns < 5:
|
| 102 |
response = client.chat.completions.create(model="gpt-4o", messages=history_state, temperature=0.7)
|
| 103 |
+
ai_response_text = response.choices[0].message.content
|
| 104 |
+
|
| 105 |
+
try:
|
| 106 |
+
tts = gTTS(text=ai_response_text, lang='en')
|
| 107 |
+
mp3_fp = io.BytesIO()
|
| 108 |
+
tts.write_to_fp(mp3_fp)
|
| 109 |
+
mp3_fp.seek(0)
|
| 110 |
+
audio_base64 = base64.b64encode(mp3_fp.read()).decode('utf-8')
|
| 111 |
+
audio_player = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls autoplay></audio>'
|
| 112 |
+
ai_display_content = f"{ai_response_text}<br>{audio_player}"
|
| 113 |
+
except Exception as e:
|
| 114 |
+
print(f"Error al generar TTS para la respuesta del chat: {e}")
|
| 115 |
+
ai_display_content = ai_response_text
|
| 116 |
+
|
| 117 |
+
history_state.append({"role": "assistant", "content": ai_response_text, "display_content": ai_display_content})
|
| 118 |
+
|
| 119 |
+
chat_display = [(msg['display_content'], None) if msg['role']=='user' else (None, msg['display_content']) for msg in history_state[1:]]
|
| 120 |
+
|
| 121 |
return chat_display, history_state, f"Responses remaining: {responses_remaining}", gr.update(visible=False), gr.update(visible=False)
|
| 122 |
else: # Turno 5: generar evaluaci贸n
|
| 123 |
print("Generating final evaluation...")
|
|
|
|
| 130 |
fb_es = result.get('feedback_es', {}); md_es = f"## Reporte Final (Nivel MCERL: {result.get('cefr_level', 'N/A')})\n### Fortalezas\n{fb_es.get('fortalezas', '')}\n### 脕reas a Mejorar\n{fb_es.get('areas_a_mejorar', '')}\n### Retroalimentaci贸n por Palabra\n"
|
| 131 |
for item in fb_es.get('feedback_por_palabra', []): md_es += f"- **{item['palabra']}**: {item['feedback']}\n"
|
| 132 |
|
| 133 |
+
chat_display = [(msg['display_content'], None) if msg['role']=='user' else (None, msg['display_content']) for msg in history_state[1:]]
|
| 134 |
+
chat_display[-1] = (chat_display[-1][0], "Thank you! Your final report is now available on the right.")
|
| 135 |
|
| 136 |
# --- CAMBIO CLAVE: Reiniciamos el historial para la siguiente conversaci贸n ---
|
| 137 |
return chat_display, [], "Conversation finished!", gr.update(value=md_en, visible=True), gr.update(value=md_es, visible=True)
|
| 138 |
except Exception as e:
|
| 139 |
print(f"Error parsing final report: {e}")
|
| 140 |
+
return history_state[1:], [], "Error!", gr.update(value="Error generating report.", visible=True), gr.update(visible=False)
|
|
|
|
| 141 |
|
| 142 |
def run_sentence_evaluation(audio_input, reference_transcript):
|
| 143 |
if not api_key_found: raise gr.Error("OpenAI API key not found.")
|
|
|
|
| 216 |
inputs=[],
|
| 217 |
outputs=[audio_in_chat]
|
| 218 |
)
|
| 219 |
+
|
| 220 |
# --- PESTA脩A 2: EVALUACI脫N POR FRASE ---
|
| 221 |
with gr.TabItem("Evaluaci贸n por Frase"):
|
| 222 |
TONGUE_TWISTERS = ["Peter Piper picked a peck of pickled peppers.", "She sells seashells by the seashore.", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", "Betty Botter bought some butter but she said the butter鈥檚 bitter.", "A proper copper coffee pot."]
|