am-om commited on
Commit
d7e737c
·
1 Parent(s): fc86e27
Files changed (1) hide show
  1. app.py +0 -127
app.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  import gradio as gr
3
 
4
 
@@ -123,129 +122,3 @@ with gr.Blocks() as demo:
123
 
124
  if __name__ == "__main__":
125
  demo.launch(share = True)
126
- =======
127
- import gradio as gr
128
-
129
-
130
-
131
- import os
132
- import json
133
- from ultralytics import YOLO
134
- import supervision as sv
135
-
136
-
137
- # --- 1. CONFIGURATION ---
138
-
139
- # Folder where the script is
140
- BASE_DIR = os.path.dirname(os.path.abspath(__file__))
141
-
142
- # Go up one level to Om_Singh
143
- PROJECT_DIR = os.path.dirname(BASE_DIR) # parent folder of app/
144
-
145
- # Paths for outputs in video_and_json folder
146
- VIDEO_DIR = os.path.join(PROJECT_DIR, "video_and_json") # Om_Singh/video_and_json
147
- os.makedirs(VIDEO_DIR, exist_ok=True) # ensure folder exists
148
-
149
- MODEL_PATH = os.path.join(BASE_DIR, "best.pt") # model stays in app/
150
- OUTPUT_VIDEO_PATH = os.path.join(VIDEO_DIR, "output.mp4")
151
- OUTPUT_JSON_PATH = os.path.join(VIDEO_DIR, "result.json")
152
-
153
- def process_video(INPUT_VIDEO_PATH, OUTPUT_VIDEO_PATH, OUTPUT_JSON_PATH):
154
- print("Loading model...")
155
- model = YOLO(MODEL_PATH)
156
-
157
- print("Initializing tracker and annotators...")
158
- tracker = sv.ByteTrack()
159
- box_annotator = sv.BoxAnnotator(thickness=5)
160
- label_annotator = sv.LabelAnnotator(text_position=sv.Position.TOP_CENTER, text_scale = 1, text_thickness= 1)
161
-
162
- sv.LabelAnnotator()
163
-
164
- frame_generator = sv.get_video_frames_generator(source_path=INPUT_VIDEO_PATH)
165
- video_info = sv.VideoInfo.from_video_path(INPUT_VIDEO_PATH)
166
-
167
- results_list = []
168
-
169
- with sv.VideoSink(target_path=OUTPUT_VIDEO_PATH, video_info=video_info) as sink:
170
- print("Processing video frames...")
171
- for frame_number, frame in enumerate(frame_generator):
172
- # Run YOLO prediction
173
- results = model(frame)[0]
174
- detections = sv.Detections.from_ultralytics(results)
175
-
176
- # Update tracker
177
- tracked_detections = tracker.update_with_detections(detections=detections)
178
-
179
- # Prepare labels
180
- labels = [
181
- f"ID: {det[4]} {model.model.names[int(det[3])]}"
182
- for det in tracked_detections
183
- ]
184
-
185
- # Annotate frame
186
- annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=tracked_detections)
187
- annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=tracked_detections, labels=labels)
188
-
189
- # Save tracking info
190
- for det in tracked_detections:
191
- bbox = det[0]
192
- conf = det[2]
193
- class_id = int(det[3])
194
- tracker_id = det[4]
195
-
196
- results_list.append({
197
- "frame_number": frame_number,
198
- "track_id": int(tracker_id),
199
- "class": model.model.names[class_id],
200
- "confidence": float(conf),
201
- "bounding_box": [int(coord) for coord in bbox]
202
- })
203
-
204
- # Write annotated frame
205
- sink.write_frame(frame=annotated_frame)
206
-
207
- if frame_number % 30 == 0:
208
- print(f"Processed frame {frame_number}...")
209
-
210
- print("Video processing complete. Saving results...")
211
- with open(OUTPUT_JSON_PATH, 'w') as f:
212
- json.dump(results_list, f, indent=4)
213
-
214
- print("--- All tasks finished successfully! ---")
215
-
216
-
217
-
218
- # --- Main processing function ---
219
- def process(input_video):
220
- output_video = "output.mp4"
221
- output_json = "result.json"
222
-
223
- # During processing: red text
224
- status_html = "<p style='color:red; font-weight:bold;'>Processing...</p>"
225
-
226
- # Run video processing
227
- process_video(input_video, output_video, output_json)
228
-
229
- # After processing: green text
230
- status_html = "<p style='color:limegreen; font-weight:bold;'>Processing complete!</p>"
231
- return status_html, output_video, output_json
232
-
233
- # --- Gradio UI ---
234
- with gr.Blocks() as demo:
235
- gr.Markdown("<h1 style='text-align:center;'>Vehicle and Pedestrian Tracker</h1>")
236
-
237
- input_video = gr.Video(label="Upload Video")
238
- start_btn = gr.Button("Start Tracking")
239
- status_display = gr.HTML("") # Initially empty
240
- output_video = gr.Video(label="Processed Video")
241
- output_json = gr.File(label="Download JSON Output")
242
-
243
- start_btn.click(
244
- fn=process,
245
- inputs=input_video,
246
- outputs=[status_display, output_video, output_json]
247
- )
248
-
249
- if __name__ == "__main__":
250
- demo.launch(share = True)
251
- >>>>>>> e23ece8b8c3eb0b71e331ad00e70aa6c54f85b8b
 
 
1
  import gradio as gr
2
 
3
 
 
122
 
123
  if __name__ == "__main__":
124
  demo.launch(share = True)