Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import tensorflow as tf | |
| import numpy as np | |
| import cv2 | |
| import os | |
| # Load model | |
| model = tf.keras.models.load_model('best_model.keras') | |
| # Video processor class | |
| class SingleVideoProcessor: | |
| def __init__(self, video_path, max_frames=50, frame_size=(128, 128), frame_interval=3): | |
| self.video_path = video_path | |
| self.max_frames = max_frames | |
| self.frame_size = frame_size | |
| self.frame_interval = frame_interval | |
| def process(self): | |
| frames = self.load_video_frames() | |
| frames = self.resize_frames(frames) | |
| frames = self.pad_video_frames(frames) | |
| return np.array(frames, dtype=np.float32) | |
| def load_video_frames(self): | |
| frames = [] | |
| cap = cv2.VideoCapture(self.video_path) | |
| frame_count = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| frame_count += 1 | |
| if frame_count % self.frame_interval == 0: | |
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| frames.append(frame) | |
| cap.release() | |
| return frames | |
| def resize_frames(self, frames): | |
| return [cv2.resize(frame, self.frame_size) for frame in frames] | |
| def pad_video_frames(self, frames): | |
| if len(frames) < self.max_frames: | |
| padding = self.max_frames - len(frames) | |
| frames.extend([np.zeros_like(frames[0])] * padding) | |
| else: | |
| frames = frames[:self.max_frames] | |
| return frames | |
| # Prediction function | |
| def predict_violence(video_path): | |
| processor = SingleVideoProcessor(video_path) | |
| processed_video = processor.process() | |
| processed_video = np.expand_dims(processed_video, axis=0) | |
| prediction = model.predict(processed_video)[0][0] | |
| label = "Violence" if prediction >= 0.5 else "Non-Violence" | |
| confidence = f"{prediction:.4f}" | |
| print(prediction) | |
| return f"{label} (Confidence: {confidence})" | |
| # Example videos (ensure these exist in your working directory) | |
| examples = [ | |
| ["NV_1.mp4"], | |
| ["V_1000.mp4"], | |
| ["V_102.mp4"], | |
| ["NV_11.mp4"] | |
| ] | |
| # Launch Gradio Interface | |
| demo = gr.Interface( | |
| fn=predict_violence, | |
| inputs=gr.Video(label="Upload or Select a Video"), | |
| outputs=gr.Text(label="Prediction"), | |
| examples=examples, | |
| title="Violence Detection in Video", | |
| description="This model classifies videos as Violence or Non-Violence. Upload a short clip or select from examples." | |
| ) | |
| demo.launch() | |