Spaces:
Build error
Build error
| import gradio as gr | |
| from anomaly_gradio import process_anomalies_video, process_anomalies_image | |
| from segmentation_gradio import process_segmentation_video, process_segmentation_image | |
| from detection_gradio import process_detection_video, process_detection_image | |
| from weather_gradio import process_weather_video, process_weather_image | |
| from road_condition_gradio import process_road_condition_video, process_road_condition_image | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown( | |
| """ | |
| # Video and Image Processing Tasks | |
| Select a tab and upload either a video or an image to start processing. | |
| """ | |
| ) | |
| with gr.Tabs(): | |
| # --- ANOMALY DETECTION TAB --- | |
| with gr.TabItem("Anomaly Detection"): | |
| gr.Markdown("### Anomaly Detection") | |
| with gr.Row(variant="panel"): | |
| # Video Column | |
| with gr.Column(scale=2): | |
| gr.Markdown("#### Video Processing") | |
| anomaly_video_input = gr.Video(label="Upload Video") | |
| anomaly_video_status = gr.Markdown("Status: Waiting for video...") | |
| anomaly_video_button = gr.Button("Start Video Processing", variant="primary") | |
| anomaly_live_output = gr.Image(label="Live Video Feed", interactive=False) | |
| final_anomaly_video = gr.Video(label="Final Processed Video", interactive=False) | |
| # Image Column | |
| with gr.Column(scale=1): | |
| gr.Markdown("#### Image Processing") | |
| anomaly_image_input = gr.Image(label="Upload Image", type="numpy") | |
| anomaly_image_status = gr.Markdown("Status: Waiting for image...") | |
| anomaly_image_button = gr.Button("Start Image Processing", variant="primary") | |
| anomaly_image_output = gr.Image(label="Processed Image Output", interactive=False) | |
| # Click Events | |
| anomaly_video_button.click(fn=process_anomalies_video, inputs=anomaly_video_input, outputs=[anomaly_live_output, final_anomaly_video, anomaly_video_status]) | |
| anomaly_image_button.click(fn=process_anomalies_image, inputs=anomaly_image_input, outputs=[anomaly_image_output, anomaly_image_status]) | |
| # --- SEMANTIC SEGMENTATION TAB --- | |
| with gr.TabItem("Semantic Segmentation"): | |
| gr.Markdown("### Semantic Segmentation") | |
| segment_model_input = gr.Radio(["MobileNet", "ResNet34"], label="Choose Model", value="MobileNet") | |
| with gr.Row(variant="panel"): | |
| # Video Column | |
| with gr.Column(scale=2): | |
| gr.Markdown("#### Video Processing") | |
| segment_video_input = gr.Video(label="Upload Video") | |
| segment_video_status = gr.Markdown("Status: Waiting for video...") | |
| segment_video_button = gr.Button("Start Video Processing", variant="primary") | |
| segment_live_output = gr.Image(label="Live Video Feed", interactive=False) | |
| final_segment_video = gr.Video(label="Final Segmented Video", interactive=False) | |
| # Image Column | |
| with gr.Column(scale=1): | |
| gr.Markdown("#### Image Processing") | |
| segment_image_input = gr.Image(label="Upload Image", type="numpy") | |
| segment_image_status = gr.Markdown("Status: Waiting for image...") | |
| segment_image_button = gr.Button("Start Image Processing", variant="primary") | |
| segment_image_output = gr.Image(label="Processed Image Output", interactive=False) | |
| # Click Events | |
| segment_video_button.click(fn=process_segmentation_video, inputs=[segment_video_input, segment_model_input], outputs=[segment_live_output, final_segment_video, segment_video_status]) | |
| segment_image_button.click(fn=process_segmentation_image, inputs=[segment_image_input, segment_model_input], outputs=[segment_image_output, segment_image_status]) | |
| # --- OBJECT DETECTION TAB --- | |
| with gr.TabItem("Object Detection"): | |
| gr.Markdown("### Object Detection") | |
| detection_model_choice = gr.Radio(["Pre-trained YOLOv8n", "Custom Model"], label="Choose Model", value="Pre-trained YOLOv8n") | |
| with gr.Row(variant="panel"): | |
| # Video Column | |
| with gr.Column(scale=2): | |
| gr.Markdown("#### Video Processing") | |
| detection_video_input = gr.Video(label="Upload Video") | |
| detection_video_status = gr.Markdown("Status: Waiting for video...") | |
| detection_video_button = gr.Button("Start Video Processing", variant="primary") | |
| detection_live_output = gr.Image(label="Live Video Feed", interactive=False) | |
| final_detection_video = gr.Video(label="Final Detection Video", interactive=False) | |
| # Image Column | |
| with gr.Column(scale=1): | |
| gr.Markdown("#### Image Processing") | |
| detection_image_input = gr.Image(label="Upload Image", type="numpy") | |
| detection_image_status = gr.Markdown("Status: Waiting for image...") | |
| detection_image_button = gr.Button("Start Image Processing", variant="primary") | |
| detection_image_output = gr.Image(label="Processed Image Output", interactive=False) | |
| # Click Events | |
| detection_video_button.click(fn=process_detection_video, inputs=[detection_video_input, detection_model_choice], outputs=[detection_live_output, final_detection_video, detection_video_status]) | |
| detection_image_button.click(fn=process_detection_image, inputs=[detection_image_input, detection_model_choice], outputs=[detection_image_output, detection_image_status]) | |
| # --- WEATHER CLASSIFICATION TAB --- | |
| with gr.TabItem("Weather Classification"): | |
| gr.Markdown("### Weather Classification") | |
| with gr.Row(variant="panel"): | |
| # Video Column | |
| with gr.Column(scale=2): | |
| gr.Markdown("#### Video Processing") | |
| weather_video_input = gr.Video(label="Upload Video") | |
| weather_video_status = gr.Markdown("Status: Waiting for video...") | |
| weather_video_button = gr.Button("Start Video Processing", variant="primary") | |
| weather_live_output = gr.Image(label="Live Video Feed", interactive=False) | |
| final_weather_video = gr.Video(label="Final Weather Video", interactive=False) | |
| # Image Column | |
| with gr.Column(scale=1): | |
| gr.Markdown("#### Image Processing") | |
| weather_image_input = gr.Image(label="Upload Image", type="numpy") | |
| weather_image_status = gr.Markdown("Status: Waiting for image...") | |
| weather_image_button = gr.Button("Start Image Processing", variant="primary") | |
| weather_image_output = gr.Image(label="Processed Image Output", interactive=False) | |
| # Click Events | |
| weather_video_button.click(fn=process_weather_video, inputs=weather_video_input, outputs=[weather_live_output, final_weather_video, weather_video_status]) | |
| weather_image_button.click(fn=process_weather_image, inputs=weather_image_input, outputs=[weather_image_output, weather_image_status]) | |
| # --- ROAD CONDITION TAB --- | |
| with gr.TabItem("Road Condition"): | |
| gr.Markdown("### Road Condition Classification") | |
| road_model_choice = gr.Radio(["ResNet101", "Xception"], label="Choose Model", value="ResNet101") | |
| with gr.Row(variant="panel"): | |
| # Video Column | |
| with gr.Column(scale=2): | |
| gr.Markdown("#### Video Processing") | |
| road_video_input = gr.Video(label="Upload Video") | |
| road_video_status = gr.Markdown("Status: Waiting for video...") | |
| road_video_button = gr.Button("Start Video Processing", variant="primary") | |
| road_live_output = gr.Image(label="Live Video Feed", interactive=False) | |
| final_road_video = gr.Video(label="Final Road Condition Video", interactive=False) | |
| # Image Column | |
| with gr.Column(scale=1): | |
| gr.Markdown("#### Image Processing") | |
| road_image_input = gr.Image(label="Upload Image", type="numpy") | |
| road_image_status = gr.Markdown("Status: Waiting for image...") | |
| road_image_button = gr.Button("Start Image Processing", variant="primary") | |
| road_image_output = gr.Image(label="Processed Image Output", interactive=False) | |
| # Click Events | |
| road_video_button.click(fn=process_road_condition_video, inputs=[road_video_input, road_model_choice], outputs=[road_live_output, final_road_video, road_video_status]) | |
| road_image_button.click(fn=process_road_condition_image, inputs=[road_image_input, road_model_choice], outputs=[road_image_output, road_image_status]) | |
| demo.launch() |