3g3ueiw commited on
Commit
3727aa9
·
verified ·
1 Parent(s): 854cbff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -37
app.py CHANGED
@@ -1,42 +1,135 @@
1
  import gradio as gr
2
  import os
3
 
4
- # Define cases
5
- cases = {
6
- "Case 1": {
7
- "description": "A 55-year-old male with a 2-year history of progressive ALS. He initially presented with weakness in his right hand, gradually developing slurred speech and voice changes. Currently, his speech is slow, nasal, and difficult to understand, reflecting bulbar involvement.",
8
- "images": ["1.png", "2.png", "3.png", "4.png", "5.png"],
9
- "audios": ["1.wav", "2.wav"]
10
- },
11
- "Case 2": {
12
- "description": "A 62-year-old female diagnosed with bulbar-onset ALS. Her earliest symptom was voice hoarseness, followed by progressive dysarthria and difficulty swallowing. She reports frustration with communication as her speech has become nearly unintelligible.",
13
- "images": ["6.png", "7.png", "8.png", "9.png", "10.png"],
14
- "audios": ["3.wav", "4.wav"]
15
- }
16
- }
17
-
18
- def load_case(case_name):
19
- case = cases[case_name]
20
- return case["description"], case["images"], case["audios"]
21
-
22
- with gr.Blocks() as demo:
23
- gr.Markdown("## ALS Case Studies")
24
-
25
- with gr.Row():
26
- case_selector = gr.Dropdown(list(cases.keys()), label="Select Case", value="Case 1")
27
-
28
- with gr.Row():
29
- case_description = gr.Textbox(label="Case Description", interactive=False, lines=3) # smaller box
30
-
31
- with gr.Row():
32
- with gr.Column(scale=2): # wider space for images
33
- image_gallery = gr.Gallery(label="Case Images").style(grid=[2], height="auto")
34
- with gr.Column(scale=1):
35
- audio_gallery = gr.Audio(label="Case Audio", type="filepath")
36
-
37
- case_selector.change(fn=load_case, inputs=case_selector, outputs=[case_description, image_gallery, audio_gallery])
38
-
39
- # Load default case
40
- demo.load(fn=load_case, inputs=case_selector, outputs=[case_description, image_gallery, audio_gallery])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  demo.launch()
 
1
  import gradio as gr
2
  import os
3
 
4
+ # ---------- Helpers ----------
5
+ def file_if_exists(p):
6
+ return p if os.path.exists(p) else None
7
+
8
+ # ---------- Case Data ----------
9
+ # Case 1 (ALS)
10
+ case1_images = ["1.png", "2.png", "3.png", "4.png"]
11
+ case1_audio = ["1.wav"]
12
+ case1_score = 0.940
13
+ case1_text = (
14
+ "A 57-year-old male presents with progressive weakness in his right hand over the past 6 months. "
15
+ "Over the past 2 months, he has noticed slurred speech and difficulty projecting his voice with occasional choking on liquids. "
16
+ "Exam: fasciculations, hyperreflexia, mild dysarthria."
17
+ "Diagnosis: ALS with early bulbar involvement."
18
+ )
19
+
20
+ # Case 2 (Congenital mutism)
21
+ case2_images = ["5.png", "6.png", "7.png", "8.png"]
22
+ case2_audio = ["1.wav", "2.wav", "3.wav"]
23
+ case2_score = 0.750
24
+ case2_text = (
25
+ "A 23-year-old male was born with congenital mutism and has never spoken. He now seeks to restore his voice using "
26
+ "rt-MRI of the vocal tract. Normal cognition and hearing. Physical exam shows no structural abnormalities. "
27
+ "Imaging guides personalized speech restoration."
28
+ )
29
+
30
+ # ---------- Step Handlers ----------
31
+ def case1_next(step):
32
+ step += 1
33
+ # Show images (steps 1..len(images)-1)
34
+ if step < len(case1_images):
35
+ img = file_if_exists(case1_images[step])
36
+ return step, img, case1_text, None, "" # audio, score
37
+ # Audio step
38
+ elif step == len(case1_images):
39
+ audio_file = file_if_exists(case1_audio[0])
40
+ return step, None, case1_text, audio_file, ""
41
+ # Score step
42
+ else:
43
+ return step, None, case1_text, None, f"{case1_score:.3f}"
44
+
45
+ def case2_next(step):
46
+ step += 1
47
+ # Show images (steps 1..len(images)-1)
48
+ if step < len(case2_images):
49
+ img = file_if_exists(case2_images[step])
50
+ return step, img, case2_text, None, None, None, "" # audios a,b,c, score
51
+ # Audio step: show all three players at once
52
+ elif step == len(case2_images):
53
+ a1 = file_if_exists(case2_audio[0])
54
+ a2 = file_if_exists(case2_audio[1])
55
+ a3 = file_if_exists(case2_audio[2])
56
+ return step, None, case2_text, a1, a2, a3, ""
57
+ # Score step
58
+ else:
59
+ return step, None, case2_text, None, None, None, f"{case2_score:.3f}"
60
+
61
+ # ---------- UI ----------
62
+ with gr.Blocks(css="""
63
+ #img1, #img2 { max-height: 70vh; }
64
+ """) as demo:
65
+ gr.Markdown("# Voice Reconstruction Demo")
66
+
67
+ with gr.Tabs():
68
+ # ---------------- Case 1 ----------------
69
+ with gr.Tab("Case 1: ALS Patient"):
70
+ step1 = gr.State(0)
71
+ with gr.Row():
72
+ # Wider image area (scale=3), smaller case box (scale=2, lines=6)
73
+ img_out1 = gr.Image(
74
+ value=file_if_exists(case1_images[0]), type="filepath", interactive=False, elem_id="img1", label="Image",
75
+ show_download_button=False
76
+ )
77
+ text_out1 = gr.Textbox(value=case1_text, label="Case Description", lines=6)
78
+
79
+ audio_out1 = gr.Audio(label="Predicted Audio", type="filepath")
80
+ score_out1 = gr.Textbox(label="Score", lines=1)
81
+
82
+ with gr.Row():
83
+ next_btn1 = gr.Button("Next Step", variant="primary")
84
+ reset_btn1 = gr.Button("Reset")
85
+
86
+ def reset_case1():
87
+ return 0, file_if_exists(case1_images[0]), case1_text, None, ""
88
+ reset_btn1.click(
89
+ fn=reset_case1,
90
+ inputs=[],
91
+ outputs=[step1, img_out1, text_out1, audio_out1, score_out1]
92
+ )
93
+
94
+ next_btn1.click(
95
+ fn=case1_next,
96
+ inputs=[step1],
97
+ outputs=[step1, img_out1, text_out1, audio_out1, score_out1]
98
+ )
99
+
100
+ # ---------------- Case 2 ----------------
101
+ with gr.Tab("Case 2: Congenital Mutism"):
102
+ step2 = gr.State(0)
103
+ with gr.Row():
104
+ img_out2 = gr.Image(
105
+ value=file_if_exists(case2_images[0]), type="filepath", interactive=False, elem_id="img2", label="Image",
106
+ show_download_button=False
107
+ )
108
+ text_out2 = gr.Textbox(value=case2_text, label="Case Description", lines=6)
109
+
110
+ with gr.Row():
111
+ audio_out2a = gr.Audio(label="Predicted Audio 1", type="filepath")
112
+ audio_out2b = gr.Audio(label="Predicted Audio 2", type="filepath")
113
+ audio_out2c = gr.Audio(label="Predicted Audio 3", type="filepath")
114
+
115
+ score_out2 = gr.Textbox(label="Score", lines=1)
116
+
117
+ with gr.Row():
118
+ next_btn2 = gr.Button("Next Step", variant="primary")
119
+ reset_btn2 = gr.Button("Reset")
120
+
121
+ def reset_case2():
122
+ return 0, file_if_exists(case2_images[0]), case2_text, None, None, None, ""
123
+ reset_btn2.click(
124
+ fn=reset_case2,
125
+ inputs=[],
126
+ outputs=[step2, img_out2, text_out2, audio_out2a, audio_out2b, audio_out2c, score_out2]
127
+ )
128
+
129
+ next_btn2.click(
130
+ fn=case2_next,
131
+ inputs=[step2],
132
+ outputs=[step2, img_out2, text_out2, audio_out2a, audio_out2b, audio_out2c, score_out2]
133
+ )
134
 
135
  demo.launch()