Upload 8 files
Browse files
app.py
CHANGED
|
@@ -214,13 +214,13 @@ def process_assessment_audio(audio, assessment_type, item_index):
|
|
| 214 |
global current_item_index, assessment_results
|
| 215 |
|
| 216 |
if audio is None:
|
| 217 |
-
return None, f"No audio detected. Please try again.",
|
| 218 |
|
| 219 |
# Convert speech to text
|
| 220 |
transcript = speech_to_text(audio)
|
| 221 |
|
| 222 |
if not transcript:
|
| 223 |
-
return None, "I couldn't understand the speech. Please try again.",
|
| 224 |
|
| 225 |
# Process based on assessment type
|
| 226 |
if assessment_type == "articulation":
|
|
@@ -328,6 +328,39 @@ def init_language_assessment():
|
|
| 328 |
|
| 329 |
return audio_response, message, None, 0
|
| 330 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 331 |
def process_conversation_audio(audio):
|
| 332 |
"""Process recorded audio for conversation mode"""
|
| 333 |
if audio is None:
|
|
@@ -433,7 +466,7 @@ button.secondary {
|
|
| 433 |
|
| 434 |
# Create Gradio interface with tabs for different modes
|
| 435 |
with gr.Blocks(title="CASL 2 - Speech Therapy Assessment", css=custom_css) as demo:
|
| 436 |
-
# Current state variables
|
| 437 |
current_item_idx = gr.State(0)
|
| 438 |
|
| 439 |
# App header
|
|
@@ -623,11 +656,17 @@ with gr.Blocks(title="CASL 2 - Speech Therapy Assessment", css=custom_css) as de
|
|
| 623 |
outputs=[art_audio_output, art_result_display, current_item_idx, art_image]
|
| 624 |
)
|
| 625 |
|
| 626 |
-
#
|
| 627 |
-
|
| 628 |
-
fn=
|
| 629 |
-
inputs=[current_item_idx],
|
| 630 |
-
outputs=[art_item_indicator]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 631 |
)
|
| 632 |
|
| 633 |
# Connect components - Language Assessment
|
|
@@ -642,11 +681,17 @@ with gr.Blocks(title="CASL 2 - Speech Therapy Assessment", css=custom_css) as de
|
|
| 642 |
outputs=[lang_audio_output, lang_result_display, current_item_idx, gr.Image(visible=False)]
|
| 643 |
)
|
| 644 |
|
| 645 |
-
#
|
| 646 |
-
|
| 647 |
-
fn=
|
| 648 |
-
inputs=[current_item_idx],
|
| 649 |
-
outputs=[lang_item_indicator]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 650 |
)
|
| 651 |
|
| 652 |
# Launch the app
|
|
|
|
| 214 |
global current_item_index, assessment_results
|
| 215 |
|
| 216 |
if audio is None:
|
| 217 |
+
return None, f"No audio detected. Please try again.", item_index, None
|
| 218 |
|
| 219 |
# Convert speech to text
|
| 220 |
transcript = speech_to_text(audio)
|
| 221 |
|
| 222 |
if not transcript:
|
| 223 |
+
return None, "I couldn't understand the speech. Please try again.", item_index, None
|
| 224 |
|
| 225 |
# Process based on assessment type
|
| 226 |
if assessment_type == "articulation":
|
|
|
|
| 328 |
|
| 329 |
return audio_response, message, None, 0
|
| 330 |
|
| 331 |
+
def update_art_item_indicator(idx):
|
| 332 |
+
"""Update articulation item indicator"""
|
| 333 |
+
return f"{idx+1}/{len(articulation_exercises['words'])}"
|
| 334 |
+
|
| 335 |
+
def update_lang_item_indicator(idx):
|
| 336 |
+
"""Update language item indicator"""
|
| 337 |
+
return f"{idx+1}/{len(language_exercises['tasks'])}"
|
| 338 |
+
|
| 339 |
+
def navigate_articulation(direction, current_idx):
|
| 340 |
+
"""Navigate through articulation items"""
|
| 341 |
+
if direction == "prev":
|
| 342 |
+
new_idx = max(0, current_idx - 1)
|
| 343 |
+
else: # next
|
| 344 |
+
new_idx = min(len(articulation_exercises["words"]) - 1, current_idx + 1)
|
| 345 |
+
|
| 346 |
+
current_word = articulation_exercises["words"][new_idx]
|
| 347 |
+
message = f"Current word: {current_word['word']}"
|
| 348 |
+
current_image = current_word["imageUrl"]
|
| 349 |
+
|
| 350 |
+
return update_art_item_indicator(new_idx), message, current_image, new_idx
|
| 351 |
+
|
| 352 |
+
def navigate_language(direction, current_idx):
|
| 353 |
+
"""Navigate through language items"""
|
| 354 |
+
if direction == "prev":
|
| 355 |
+
new_idx = max(0, current_idx - 1)
|
| 356 |
+
else: # next
|
| 357 |
+
new_idx = min(len(language_exercises["tasks"]) - 1, current_idx + 1)
|
| 358 |
+
|
| 359 |
+
current_task = language_exercises["tasks"][new_idx]
|
| 360 |
+
message = f"Current task: {current_task['prompt']}"
|
| 361 |
+
|
| 362 |
+
return update_lang_item_indicator(new_idx), message, new_idx
|
| 363 |
+
|
| 364 |
def process_conversation_audio(audio):
|
| 365 |
"""Process recorded audio for conversation mode"""
|
| 366 |
if audio is None:
|
|
|
|
| 466 |
|
| 467 |
# Create Gradio interface with tabs for different modes
|
| 468 |
with gr.Blocks(title="CASL 2 - Speech Therapy Assessment", css=custom_css) as demo:
|
| 469 |
+
# Current state variables (in Gradio 3.50.0, State doesn't have a change event)
|
| 470 |
current_item_idx = gr.State(0)
|
| 471 |
|
| 472 |
# App header
|
|
|
|
| 656 |
outputs=[art_audio_output, art_result_display, current_item_idx, art_image]
|
| 657 |
)
|
| 658 |
|
| 659 |
+
# Fixed navigation for Gradio 3.50.0
|
| 660 |
+
art_next_button.click(
|
| 661 |
+
fn=navigate_articulation,
|
| 662 |
+
inputs=[gr.Textbox(value="next", visible=False), current_item_idx],
|
| 663 |
+
outputs=[art_item_indicator, art_current_display, art_image, current_item_idx]
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
art_prev_button.click(
|
| 667 |
+
fn=navigate_articulation,
|
| 668 |
+
inputs=[gr.Textbox(value="prev", visible=False), current_item_idx],
|
| 669 |
+
outputs=[art_item_indicator, art_current_display, art_image, current_item_idx]
|
| 670 |
)
|
| 671 |
|
| 672 |
# Connect components - Language Assessment
|
|
|
|
| 681 |
outputs=[lang_audio_output, lang_result_display, current_item_idx, gr.Image(visible=False)]
|
| 682 |
)
|
| 683 |
|
| 684 |
+
# Fixed navigation for language assessment
|
| 685 |
+
lang_next_button.click(
|
| 686 |
+
fn=navigate_language,
|
| 687 |
+
inputs=[gr.Textbox(value="next", visible=False), current_item_idx],
|
| 688 |
+
outputs=[lang_item_indicator, lang_current_display, current_item_idx]
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
lang_prev_button.click(
|
| 692 |
+
fn=navigate_language,
|
| 693 |
+
inputs=[gr.Textbox(value="prev", visible=False), current_item_idx],
|
| 694 |
+
outputs=[lang_item_indicator, lang_current_display, current_item_idx]
|
| 695 |
)
|
| 696 |
|
| 697 |
# Launch the app
|