txarst commited on
Commit
0bb4f33
Β·
1 Parent(s): f34f8d4

indentation

Browse files
Files changed (1) hide show
  1. gradio_app.py +87 -87
gradio_app.py CHANGED
@@ -281,93 +281,93 @@ def create_gradio_interface():
281
  try:
282
  # Create a unified interface that can handle both images and videos
283
  with gr.Blocks(title="πŸ‘οΈ PupilSense πŸ‘οΈπŸ•΅οΈβ€β™‚οΈ") as demo:
284
- gr.Markdown("# πŸ‘οΈ PupilSense - Pupil Diameter Analysis")
285
- gr.Markdown("Upload an image or video to estimate pupil diameter using deep learning models.")
286
-
287
- with gr.Tab("Image Processing"):
288
- with gr.Row():
289
- with gr.Column():
290
- image_input = gr.Image(type="pil", label="Upload Image")
291
- image_pupil_selection = gr.Dropdown(
292
- ["left_pupil", "right_pupil", "both"],
293
- value="both",
294
- label="Pupil Selection"
295
- )
296
- image_model = gr.Dropdown(
297
- ["ResNet18", "ResNet50"],
298
- value="ResNet18",
299
- label="Model"
300
- )
301
- image_blink_detection = gr.Checkbox(value=True, label="Detect Blinks")
302
- image_submit = gr.Button("Process Image", variant="primary")
303
-
304
- with gr.Column():
305
- image_output = gr.Image(label="Results")
306
- image_text_output = gr.Textbox(label="Pupil Diameter Results", lines=5)
307
-
308
- image_submit.click(
309
- fn=process_image_simple,
310
- inputs=[image_input, image_pupil_selection, image_model, image_blink_detection],
311
- outputs=[image_output, image_text_output]
312
- )
313
-
314
- with gr.Tab("Video Processing"):
315
- with gr.Row():
316
- with gr.Column():
317
- video_input = gr.Video(label="Upload Video")
318
- video_pupil_selection = gr.Dropdown(
319
- ["left_pupil", "right_pupil", "both"],
320
- value="both",
321
- label="Pupil Selection"
322
- )
323
- video_model = gr.Dropdown(
324
- ["ResNet18", "ResNet50"],
325
- value="ResNet18",
326
- label="Model"
327
- )
328
- video_blink_detection = gr.Checkbox(value=True, label="Detect Blinks")
329
- video_submit = gr.Button("Process Video", variant="primary")
330
-
331
- with gr.Column():
332
- video_output = gr.Image(label="Diameter Analysis")
333
- video_text_output = gr.Textbox(label="Summary Statistics", lines=10)
334
-
335
- video_submit.click(
336
- fn=process_video_simple,
337
- inputs=[video_input, video_pupil_selection, video_model, video_blink_detection],
338
- outputs=[video_output, video_text_output]
339
- )
340
-
341
- # Add a unified API endpoint that can handle both images and videos
342
- with gr.Tab("API Testing"):
343
- gr.Markdown("### API Endpoint for External Access")
344
- gr.Markdown("This endpoint can process both images and videos programmatically.")
345
-
346
- with gr.Row():
347
- with gr.Column():
348
- api_media_input = gr.File(label="Upload Image or Video File")
349
- api_pupil_selection = gr.Dropdown(
350
- ["left_pupil", "right_pupil", "both"],
351
- value="both",
352
- label="Pupil Selection"
353
- )
354
- api_model = gr.Dropdown(
355
- ["ResNet18", "ResNet50"],
356
- value="ResNet18",
357
- label="Model"
358
- )
359
- api_blink_detection = gr.Checkbox(value=True, label="Detect Blinks")
360
- api_submit = gr.Button("Process Media", variant="primary")
361
-
362
- with gr.Column():
363
- api_output = gr.Image(label="Results")
364
- api_text_output = gr.Textbox(label="Analysis Results", lines=10)
365
-
366
- api_submit.click(
367
- fn=process_media_unified,
368
- inputs=[api_media_input, api_pupil_selection, api_model, api_blink_detection],
369
- outputs=[api_output, api_text_output]
370
- )
371
 
372
  print("βœ… Gradio interface created successfully")
373
  return demo
 
281
  try:
282
  # Create a unified interface that can handle both images and videos
283
  with gr.Blocks(title="πŸ‘οΈ PupilSense πŸ‘οΈπŸ•΅οΈβ€β™‚οΈ") as demo:
284
+ gr.Markdown("# πŸ‘οΈ PupilSense - Pupil Diameter Analysis")
285
+ gr.Markdown("Upload an image or video to estimate pupil diameter using deep learning models.")
286
+
287
+ with gr.Tab("Image Processing"):
288
+ with gr.Row():
289
+ with gr.Column():
290
+ image_input = gr.Image(type="pil", label="Upload Image")
291
+ image_pupil_selection = gr.Dropdown(
292
+ ["left_pupil", "right_pupil", "both"],
293
+ value="both",
294
+ label="Pupil Selection"
295
+ )
296
+ image_model = gr.Dropdown(
297
+ ["ResNet18", "ResNet50"],
298
+ value="ResNet18",
299
+ label="Model"
300
+ )
301
+ image_blink_detection = gr.Checkbox(value=True, label="Detect Blinks")
302
+ image_submit = gr.Button("Process Image", variant="primary")
303
+
304
+ with gr.Column():
305
+ image_output = gr.Image(label="Results")
306
+ image_text_output = gr.Textbox(label="Pupil Diameter Results", lines=5)
307
+
308
+ image_submit.click(
309
+ fn=process_image_simple,
310
+ inputs=[image_input, image_pupil_selection, image_model, image_blink_detection],
311
+ outputs=[image_output, image_text_output]
312
+ )
313
+
314
+ with gr.Tab("Video Processing"):
315
+ with gr.Row():
316
+ with gr.Column():
317
+ video_input = gr.Video(label="Upload Video")
318
+ video_pupil_selection = gr.Dropdown(
319
+ ["left_pupil", "right_pupil", "both"],
320
+ value="both",
321
+ label="Pupil Selection"
322
+ )
323
+ video_model = gr.Dropdown(
324
+ ["ResNet18", "ResNet50"],
325
+ value="ResNet18",
326
+ label="Model"
327
+ )
328
+ video_blink_detection = gr.Checkbox(value=True, label="Detect Blinks")
329
+ video_submit = gr.Button("Process Video", variant="primary")
330
+
331
+ with gr.Column():
332
+ video_output = gr.Image(label="Diameter Analysis")
333
+ video_text_output = gr.Textbox(label="Summary Statistics", lines=10)
334
+
335
+ video_submit.click(
336
+ fn=process_video_simple,
337
+ inputs=[video_input, video_pupil_selection, video_model, video_blink_detection],
338
+ outputs=[video_output, video_text_output]
339
+ )
340
+
341
+ # Add a unified API endpoint that can handle both images and videos
342
+ with gr.Tab("API Testing"):
343
+ gr.Markdown("### API Endpoint for External Access")
344
+ gr.Markdown("This endpoint can process both images and videos programmatically.")
345
+
346
+ with gr.Row():
347
+ with gr.Column():
348
+ api_media_input = gr.File(label="Upload Image or Video File")
349
+ api_pupil_selection = gr.Dropdown(
350
+ ["left_pupil", "right_pupil", "both"],
351
+ value="both",
352
+ label="Pupil Selection"
353
+ )
354
+ api_model = gr.Dropdown(
355
+ ["ResNet18", "ResNet50"],
356
+ value="ResNet18",
357
+ label="Model"
358
+ )
359
+ api_blink_detection = gr.Checkbox(value=True, label="Detect Blinks")
360
+ api_submit = gr.Button("Process Media", variant="primary")
361
+
362
+ with gr.Column():
363
+ api_output = gr.Image(label="Results")
364
+ api_text_output = gr.Textbox(label="Analysis Results", lines=10)
365
+
366
+ api_submit.click(
367
+ fn=process_media_unified,
368
+ inputs=[api_media_input, api_pupil_selection, api_model, api_blink_detection],
369
+ outputs=[api_output, api_text_output]
370
+ )
371
 
372
  print("βœ… Gradio interface created successfully")
373
  return demo