Demo / app.py
3g3ueiw
Update app.py
3727aa9 verified
import gradio as gr
import os
# ---------- Helpers ----------
def file_if_exists(p):
return p if os.path.exists(p) else None
# ---------- Case Data ----------
# Case 1 (ALS)
case1_images = ["1.png", "2.png", "3.png", "4.png"]
case1_audio = ["1.wav"]
case1_score = 0.940
case1_text = (
"A 57-year-old male presents with progressive weakness in his right hand over the past 6 months. "
"Over the past 2 months, he has noticed slurred speech and difficulty projecting his voice with occasional choking on liquids. "
"Exam: fasciculations, hyperreflexia, mild dysarthria."
"Diagnosis: ALS with early bulbar involvement."
)
# Case 2 (Congenital mutism)
case2_images = ["5.png", "6.png", "7.png", "8.png"]
case2_audio = ["1.wav", "2.wav", "3.wav"]
case2_score = 0.750
case2_text = (
"A 23-year-old male was born with congenital mutism and has never spoken. He now seeks to restore his voice using "
"rt-MRI of the vocal tract. Normal cognition and hearing. Physical exam shows no structural abnormalities. "
"Imaging guides personalized speech restoration."
)
# ---------- Step Handlers ----------
def case1_next(step):
step += 1
# Show images (steps 1..len(images)-1)
if step < len(case1_images):
img = file_if_exists(case1_images[step])
return step, img, case1_text, None, "" # audio, score
# Audio step
elif step == len(case1_images):
audio_file = file_if_exists(case1_audio[0])
return step, None, case1_text, audio_file, ""
# Score step
else:
return step, None, case1_text, None, f"{case1_score:.3f}"
def case2_next(step):
step += 1
# Show images (steps 1..len(images)-1)
if step < len(case2_images):
img = file_if_exists(case2_images[step])
return step, img, case2_text, None, None, None, "" # audios a,b,c, score
# Audio step: show all three players at once
elif step == len(case2_images):
a1 = file_if_exists(case2_audio[0])
a2 = file_if_exists(case2_audio[1])
a3 = file_if_exists(case2_audio[2])
return step, None, case2_text, a1, a2, a3, ""
# Score step
else:
return step, None, case2_text, None, None, None, f"{case2_score:.3f}"
# ---------- UI ----------
with gr.Blocks(css="""
#img1, #img2 { max-height: 70vh; }
""") as demo:
gr.Markdown("# Voice Reconstruction Demo")
with gr.Tabs():
# ---------------- Case 1 ----------------
with gr.Tab("Case 1: ALS Patient"):
step1 = gr.State(0)
with gr.Row():
# Wider image area (scale=3), smaller case box (scale=2, lines=6)
img_out1 = gr.Image(
value=file_if_exists(case1_images[0]), type="filepath", interactive=False, elem_id="img1", label="Image",
show_download_button=False
)
text_out1 = gr.Textbox(value=case1_text, label="Case Description", lines=6)
audio_out1 = gr.Audio(label="Predicted Audio", type="filepath")
score_out1 = gr.Textbox(label="Score", lines=1)
with gr.Row():
next_btn1 = gr.Button("Next Step", variant="primary")
reset_btn1 = gr.Button("Reset")
def reset_case1():
return 0, file_if_exists(case1_images[0]), case1_text, None, ""
reset_btn1.click(
fn=reset_case1,
inputs=[],
outputs=[step1, img_out1, text_out1, audio_out1, score_out1]
)
next_btn1.click(
fn=case1_next,
inputs=[step1],
outputs=[step1, img_out1, text_out1, audio_out1, score_out1]
)
# ---------------- Case 2 ----------------
with gr.Tab("Case 2: Congenital Mutism"):
step2 = gr.State(0)
with gr.Row():
img_out2 = gr.Image(
value=file_if_exists(case2_images[0]), type="filepath", interactive=False, elem_id="img2", label="Image",
show_download_button=False
)
text_out2 = gr.Textbox(value=case2_text, label="Case Description", lines=6)
with gr.Row():
audio_out2a = gr.Audio(label="Predicted Audio 1", type="filepath")
audio_out2b = gr.Audio(label="Predicted Audio 2", type="filepath")
audio_out2c = gr.Audio(label="Predicted Audio 3", type="filepath")
score_out2 = gr.Textbox(label="Score", lines=1)
with gr.Row():
next_btn2 = gr.Button("Next Step", variant="primary")
reset_btn2 = gr.Button("Reset")
def reset_case2():
return 0, file_if_exists(case2_images[0]), case2_text, None, None, None, ""
reset_btn2.click(
fn=reset_case2,
inputs=[],
outputs=[step2, img_out2, text_out2, audio_out2a, audio_out2b, audio_out2c, score_out2]
)
next_btn2.click(
fn=case2_next,
inputs=[step2],
outputs=[step2, img_out2, text_out2, audio_out2a, audio_out2b, audio_out2c, score_out2]
)
demo.launch()