dsid271 commited on
Commit
d9a911c
·
verified ·
1 Parent(s): 5ed2ac0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ import os
4
+ import shutil
5
+
6
+ # 1. Load the fine-tuned YOLO model
7
+ model_path = "/content/runs/detect/train/weights/best.pt"
8
+ model = YOLO(model_path)
9
+
10
+ # Define the directory to save inference results
11
+ output_dir = "inference_results"
12
+ os.makedirs(output_dir, exist_ok=True)
13
+
14
+ # 2. Define an inference function
15
+ def predict_image_or_video(input_file, conf_threshold):
16
+ if input_file is None:
17
+ return "No input file provided."
18
+
19
+ print(f"Processing: {input_file}")
20
+ print(f"Confidence threshold: {conf_threshold}")
21
+
22
+ # Clear previous results if any
23
+ for f in os.listdir(output_dir):
24
+ os.remove(os.path.join(output_dir, f))
25
+
26
+ results = model.predict(source=input_file, conf=conf_threshold, save=True, project=output_dir, name="run", exist_ok=True)
27
+
28
+ # The results are saved in a subdirectory within output_dir/run/
29
+ # We need to find the actual path to the saved file.
30
+ # Ultralytics saves to runs/detect/runX (where X is an incrementing number)
31
+ # We need to find the latest run folder.
32
+
33
+ # Get the latest run folder within the output_dir
34
+ run_folders = [d for d in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, d))]
35
+ run_folders.sort(key=lambda x: os.path.getmtime(os.path.join(output_dir, x)), reverse=True)
36
+
37
+ if not run_folders:
38
+ return "No detection results saved."
39
+
40
+ latest_run_path = os.path.join(output_dir, run_folders[0])
41
+
42
+ # Now find the actual image or video file inside this run_folder
43
+ # For images, it saves directly. For videos, it creates a new video file.
44
+ detected_files = [f for f in os.listdir(latest_run_path) if f.endswith(('.jpg', '.jpeg', '.png', '.mp4', '.avi', '.mov'))]
45
+
46
+ if not detected_files:
47
+ return "No detected image/video found in results."
48
+
49
+ # Assuming only one file is processed at a time
50
+ result_path = os.path.join(latest_run_path, detected_files[0])
51
+
52
+ print(f"Results saved to: {result_path}")
53
+ return result_path
54
+
55
+ # 3. Create a Gradio interface
56
+ # Determine if the input is an image or video based on file extension for the output
57
+ def get_output_component(input_file):
58
+ if input_file and (input_file.endswith(('.mp4', '.avi', '.mov'))):
59
+ return gr.Video(label="Detection Results")
60
+ else:
61
+ return gr.Image(label="Detection Results")
62
+
63
+
64
+ # The Gradio interface setup
65
+ # We will use two separate interfaces, one for image and one for video, and use gr.Tab to switch between them.
66
+ # Alternatively, a single interface with conditional logic inside predict_image_or_video can work, but Gradio is a bit tricky with multiple output types based on input.
67
+ # For simplicity and direct instruction fulfillment, let's make two functions and use gr.Tab.
68
+
69
+
70
+ def predict_image(image_file, conf_threshold):
71
+ if image_file is None:
72
+ return None
73
+ return predict_image_or_video(image_file, conf_threshold)
74
+
75
+ def predict_video(video_file, conf_threshold):
76
+ if video_file is None:
77
+ return None
78
+ return predict_image_or_video(video_file, conf_threshold)
79
+
80
+
81
+ with gr.Blocks() as demo:
82
+ gr.Markdown("# YOLOv8 Signature Detection")
83
+ gr.Markdown("Upload an image or video to perform signature detection using a fine-tuned YOLOv8n model.")
84
+
85
+ with gr.Tab("Image Detection"):
86
+ image_input = gr.Image(type="filepath", label="Upload Image")
87
+ image_conf_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold")
88
+ image_output = gr.Image(label="Detection Results")
89
+ image_button = gr.Button("Detect Signature in Image")
90
+ image_button.click(predict_image, inputs=[image_input, image_conf_slider], outputs=image_output)
91
+
92
+ with gr.Tab("Video Detection"):
93
+ video_input = gr.Video(type="filepath", label="Upload Video")
94
+ video_conf_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold")
95
+ video_output = gr.Video(label="Detection Results")
96
+ video_button = gr.Button("Detect Signature in Video")
97
+ video_button.click(predict_video, inputs=[video_input, video_conf_slider], outputs=video_output)
98
+
99
+
100
+ # 4. Launch the Gradio interface
101
+ demo.launch(share=True)