|
|
| def execute_python_script(script_code, context=None): |
| """ |
| Safely execute Python script with limited context |
| Returns script output or error message |
| """ |
| import io |
| import sys |
| from contextlib import redirect_stdout, redirect_stderr |
| |
| if context is None: |
| context = { |
| 'video_path': None, |
| 'frame_data': None, |
| 'metadata': None |
| } |
| |
| # Capture output |
| output = io.StringIO() |
| error = io.StringIO() |
| |
| try: |
| with redirect_stdout(output), redirect_stderr(error): |
| # Create restricted execution environment |
| exec_globals = { |
| '__builtins__': { |
| 'print': print, |
| 'str': str, |
| 'int': int, |
| 'float': float, |
| 'list': list, |
| 'dict': dict, |
| 'tuple': tuple, |
| 'range': range, |
| 'len': len, |
| 'enumerate': enumerate, |
| 'zip': zip, |
| 'min': min, |
| 'max': max, |
| 'sum': sum, |
| 'abs': abs, |
| 'round': round |
| }, |
| 'context': context |
| } |
| |
| exec(script_code, exec_globals) |
| |
| if error.getvalue(): |
| return f"Error: {error.getvalue()}" |
| else: |
| return output.getvalue() |
| |
| except Exception as e: |
| return f"Error executing script: {str(e)}" |
|
|
| def validate_python_script(script_code): |
| """ |
| Validate Python script syntax and restricted functions |
| Returns (is_valid, error_message) |
| """ |
| import ast |
| |
| try: |
| # Parse script into AST |
| tree = ast.parse(script_code) |
| |
| # Check for disallowed nodes |
| for node in ast.walk(tree): |
| if isinstance(node, ast.Import): |
| return (False, "Import statements are not allowed") |
| if isinstance(node, ast.ImportFrom): |
| return (False, "Import statements are not allowed") |
| if isinstance(node, ast.Call): |
| if isinstance(node.func, ast.Name): |
| if node.func.id in ['eval', 'exec', 'open', 'execfile']: |
| return (False, f"Function {node.func.id}() is not allowed") |
| |
| return (True, "Script is valid") |
| except SyntaxError as e: |
| return (False, f"Syntax error: {str(e)}") |
|
|
|
|
| def extract_video_metadata(video_path): |
| """ |
| Extract technical metadata from video file |
| Returns dictionary of metadata |
| """ |
| import cv2 |
| from datetime import datetime |
| import os |
| |
| cap = cv2.VideoCapture(video_path) |
| |
| if not cap.isOpened(): |
| return None |
| |
| # Get basic video properties |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
| fps = cap.get(cv2.CAP_PROP_FPS) |
| frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| duration = frame_count / fps |
| |
| # Get file info |
| file_stats = os.stat(video_path) |
| created = datetime.fromtimestamp(file_stats.st_ctime) |
| modified = datetime.fromtimestamp(file_stats.st_mtime) |
| |
| cap.release() |
| |
| return { |
| 'filename': os.path.basename(video_path), |
| 'path': video_path, |
| 'resolution': f"{width}x{height}", |
| 'fps': fps, |
| 'duration': duration, |
| 'frame_count': frame_count, |
| 'size': file_stats.st_size, |
| 'created': created, |
| 'modified': modified |
| } |
|
|
| def extract_audio_metadata(audio_path): |
| """ |
| Extract technical metadata from audio file |
| Returns dictionary of metadata |
| """ |
| import wave |
| from datetime import datetime |
| import os |
| |
| try: |
| with wave.open(audio_path, 'rb') as audio_file: |
| channels = audio_file.getnchannels() |
| sample_width = audio_file.getsampwidth() |
| framerate = audio_file.getframerate() |
| frames = audio_file.getnframes() |
| duration = frames / float(framerate) |
| |
| file_stats = os.stat(audio_path) |
| created = datetime.fromtimestamp(file_stats.st_ctime) |
| modified = datetime.fromtimestamp(file_stats.st_mtime) |
| |
| return { |
| 'filename': os.path.basename(audio_path), |
| 'path': audio_path, |
| 'channels': channels, |
| 'sample_width': sample_width, |
| 'sample_rate': framerate, |
| 'duration': duration, |
| 'size': file_stats.st_size, |
| 'created': created, |
| 'modified': modified |
| } |
| except: |
| return None |
|
|
| def extract_exif_data(image_path): |
| """ |
| Extract EXIF metadata from image file |
| Returns dictionary of EXIF data |
| """ |
| from PIL import Image, ExifTags |
| from datetime import datetime |
| import os |
| |
| try: |
| img = Image.open(image_path) |
| exif_data = img._getexif() |
| |
| if not exif_data: |
| return None |
| |
| exif = {} |
| for tag, value in exif_data.items(): |
| decoded = ExifTags.TAGS.get(tag, tag) |
| exif[decoded] = value |
| |
| # Get file info |
| file_stats = os.stat(image_path) |
| created = datetime.fromtimestamp(file_stats.st_ctime) |
| modified = datetime.fromtimestamp(file_stats.st_mtime) |
| |
| exif['filename'] = os.path.basename(image_path) |
| exif['path'] = image_path |
| exif['size'] = file_stats.st_size |
| exif['created'] = created |
| exif['modified'] = modified |
| |
| return exif |
| except: |
| return None |
|
|
|
|
| def detect_faces(frame, min_confidence=0.7): |
| """ |
| Detect faces in a frame using OpenCV DNN |
| Returns list of face bounding boxes and confidence scores |
| """ |
| import cv2 |
| import numpy as np |
| |
| # Load pre-trained face detection model |
| model_file = "models/res10_300x300_ssd_iter_140000_fp16.caffemodel" |
| config_file = "models/deploy.prototxt" |
| net = cv2.dnn.readNetFromCaffe(config_file, model_file) |
| |
| (h, w) = frame.shape[:2] |
| blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, |
| (300, 300), (104.0, 177.0, 123.0)) |
| |
| net.setInput(blob) |
| detections = net.forward() |
| |
| faces = [] |
| for i in range(0, detections.shape[2]): |
| confidence = detections[0, 0, i, 2] |
| if confidence > min_confidence: |
| box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) |
| faces.append({ |
| 'box': box.astype("int"), |
| 'confidence': float(confidence) |
| }) |
| |
| return faces |
|
|
| def detect_objects(frame, min_confidence=0.5): |
| """ |
| Detect common objects using COCO-trained model |
| Returns list of detected objects with confidence |
| """ |
| import cv2 |
| import numpy as np |
| |
| # Load COCO class labels and model |
| classes = [] |
| with open("models/coco.names", "r") as f: |
| classes = [line.strip() for line in f.readlines()] |
| |
| model_file = "models/yolov3.weights" |
| config_file = "models/yolov3.cfg" |
| net = cv2.dnn.readNetFromDarknet(config_file, model_file) |
| |
| (h, w) = frame.shape[:2] |
| blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), |
| swapRB=True, crop=False) |
| |
| net.setInput(blob) |
| layer_names = net.getLayerNames() |
| output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] |
| outputs = net.forward(output_layers) |
| |
| objects = [] |
| for output in outputs: |
| for detection in output: |
| scores = detection[5:] |
| class_id = np.argmax(scores) |
| confidence = scores[class_id] |
| |
| if confidence > min_confidence: |
| center_x = int(detection[0] * w) |
| center_y = int(detection[1] * h) |
| width = int(detection[2] * w) |
| height = int(detection[3] * h) |
| |
| x = int(center_x - width / 2) |
| y = int(center_y - height / 2) |
| |
| objects.append({ |
| 'class': classes[class_id], |
| 'confidence': float(confidence), |
| 'box': (x, y, width, height) |
| }) |
| |
| return objects |
|
|
| def recognize_license_plate(frame): |
| """ |
| Attempt to recognize license plate text |
| Returns detected text and confidence |
| """ |
| import pytesseract |
| import cv2 |
| |
| # Preprocess frame for better OCR |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| blurred = cv2.GaussianBlur(gray, (5,5), 0) |
| thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] |
| |
| # Try to detect plates using contours |
| contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) |
| contours = contours[0] if len(contours) == 2 else contours[1] |
| |
| for c in contours: |
| x,y,w,h = cv2.boundingRect(c) |
| aspect_ratio = w / float(h) |
| |
| # Check if contour matches typical plate aspect ratio |
| if 2 < aspect_ratio < 5 and w > 100 and h > 30: |
| plate_roi = frame[y:y+h, x:x+w] |
| text = pytesseract.image_to_string(plate_roi, config='--psm 8') |
| if text.strip(): |
| return { |
| 'text': text.strip(), |
| 'box': (x,y,w,h) |
| } |
| |
| return None |
|
|
|
|
| def extract_frames(video_path, frames_dir, frame_rate=1): |
| """ |
| Extract frames from video at specified frame rate |
| Returns list of frame file paths |
| """ |
| import cv2 |
| import os |
| |
| if not os.path.exists(frames_dir): |
| os.makedirs(frames_dir) |
| |
| vidcap = cv2.VideoCapture(video_path) |
| fps = vidcap.get(cv2.CAP_PROP_FPS) |
| frame_interval = int(fps / frame_rate) |
| |
| count = 0 |
| frame_paths = [] |
| success, image = vidcap.read() |
| |
| while success: |
| if count % frame_interval == 0: |
| frame_path = os.path.join(frames_dir, f"frame_{count}.jpg") |
| cv2.imwrite(frame_path, image) |
| frame_paths.append(frame_path) |
| success, image = vidcap.read() |
| count += 1 |
| |
| return frame_paths |
|
|
| def apply_filters(frame, brightness=0, contrast=0, saturation=0, sharpness=0): |
| """ |
| Apply enhancement filters to a frame |
| Returns processed frame |
| """ |
| import cv2 |
| import numpy as np |
| |
| # Convert brightness/contrast values to OpenCV format |
| alpha = 1 + contrast/100 |
| beta = brightness |
| |
| # Apply brightness/contrast |
| frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=beta) |
| |
| # Convert to HSV for saturation adjustment |
| hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) |
| hsv[:,:,1] = hsv[:,:,1] * (1 + saturation/100) |
| frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) |
| |
| # Apply sharpening (unsharp mask) |
| if sharpness > 0: |
| blurred = cv2.GaussianBlur(frame, (0,0), 3) |
| frame = cv2.addWeighted(frame, 1 + sharpness/100, blurred, -sharpness/100, 0) |
| |
| return frame |
|
|
| def stabilize_video(input_path, output_path): |
| """ |
| Stabilize shaky video using OpenCV |
| Returns path to stabilized video |
| """ |
| import cv2 |
| |
| # Implementation would use feature detection and motion estimation |
| # This is a simplified placeholder |
| cap = cv2.VideoCapture(input_path) |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
| out = cv2.VideoWriter(output_path, fourcc, 30.0, |
| (int(cap.get(3)), int(cap.get(4)))) |
| |
| # Actual stabilization algorithm would go here |
| while cap.isOpened(): |
| ret, frame = cap.read() |
| if not ret: |
| break |
| out.write(frame) |
| |
| cap.release() |
| out.release() |
| return output_path |
|
|
|
|
| def execute_python_script(script_code, context=None): |
| """ |
| Safely execute Python script with limited context |
| Returns script output or error message |
| """ |
| import io |
| import sys |
| from contextlib import redirect_stdout, redirect_stderr |
| |
| if context is None: |
| context = { |
| 'video_path': None, |
| 'frame_data': None, |
| 'metadata': None |
| } |
| |
| # Capture output |
| output = io.StringIO() |
| error = io.StringIO() |
| |
| try: |
| with redirect_stdout(output), redirect_stderr(error): |
| # Create restricted execution environment |
| exec_globals = { |
| '__builtins__': { |
| 'print': print, |
| 'str': str, |
| 'int': int, |
| 'float': float, |
| 'list': list, |
| 'dict': dict, |
| 'tuple': tuple, |
| 'range': range, |
| 'len': len, |
| 'enumerate': enumerate, |
| 'zip': zip, |
| 'min': min, |
| 'max': max, |
| 'sum': sum, |
| 'abs': abs, |
| 'round': round |
| }, |
| 'context': context |
| } |
| |
| exec(script_code, exec_globals) |
| |
| if error.getvalue(): |
| return f"Error: {error.getvalue()}" |
| else: |
| return output.getvalue() |
| |
| except Exception as e: |
| return f"Error executing script: {str(e)}" |
|
|
| def validate_python_script(script_code): |
| """ |
| Validate Python script syntax and restricted functions |
| Returns (is_valid, error_message) |
| """ |
| import ast |
| |
| try: |
| # Parse script into AST |
| tree = ast.parse(script_code) |
| |
| # Check for disallowed nodes |
| for node in ast.walk(tree): |
| if isinstance(node, ast.Import): |
| return (False, "Import statements are not allowed") |
| if isinstance(node, ast.ImportFrom): |
| return (False, "Import statements are not allowed") |
| if isinstance(node, ast.Call): |
| if isinstance(node.func, ast.Name): |
| if node.func.id in ['eval', 'exec', 'open', 'execfile']: |
| return (False, f"Function {node.func.id}() is not allowed") |
| |
| return (True, "Script is valid") |
| except SyntaxError as e: |
| return (False, f"Syntax error: {str(e)}") |
|
|
|
|
| def extract_video_metadata(video_path): |
| """ |
| Extract technical metadata from video file |
| Returns dictionary of metadata |
| """ |
| import cv2 |
| from datetime import datetime |
| import os |
| |
| cap = cv2.VideoCapture(video_path) |
| |
| if not cap.isOpened(): |
| return None |
| |
| # Get basic video properties |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
| fps = cap.get(cv2.CAP_PROP_FPS) |
| frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| duration = frame_count / fps |
| |
| # Get file info |
| file_stats = os.stat(video_path) |
| created = datetime.fromtimestamp(file_stats.st_ctime) |
| modified = datetime.fromtimestamp(file_stats.st_mtime) |
| |
| cap.release() |
| |
| return { |
| 'filename': os.path.basename(video_path), |
| 'path': video_path, |
| 'resolution': f"{width}x{height}", |
| 'fps': fps, |
| 'duration': duration, |
| 'frame_count': frame_count, |
| 'size': file_stats.st_size, |
| 'created': created, |
| 'modified': modified |
| } |
|
|
| def extract_audio_metadata(audio_path): |
| """ |
| Extract technical metadata from audio file |
| Returns dictionary of metadata |
| """ |
| import wave |
| from datetime import datetime |
| import os |
| |
| try: |
| with wave.open(audio_path, 'rb') as audio_file: |
| channels = audio_file.getnchannels() |
| sample_width = audio_file.getsampwidth() |
| framerate = audio_file.getframerate() |
| frames = audio_file.getnframes() |
| duration = frames / float(framerate) |
| |
| file_stats = os.stat(audio_path) |
| created = datetime.fromtimestamp(file_stats.st_ctime) |
| modified = datetime.fromtimestamp(file_stats.st_mtime) |
| |
| return { |
| 'filename': os.path.basename(audio_path), |
| 'path': audio_path, |
| 'channels': channels, |
| 'sample_width': sample_width, |
| 'sample_rate': framerate, |
| 'duration': duration, |
| 'size': file_stats.st_size, |
| 'created': created, |
| 'modified': modified |
| } |
| except: |
| return None |
|
|
| def extract_exif_data(image_path): |
| """ |
| Extract EXIF metadata from image file |
| Returns dictionary of EXIF data |
| """ |
| from PIL import Image, ExifTags |
| from datetime import datetime |
| import os |
| |
| try: |
| img = Image.open(image_path) |
| exif_data = img._getexif() |
| |
| if not exif_data: |
| return None |
| |
| exif = {} |
| for tag, value in exif_data.items(): |
| decoded = ExifTags.TAGS.get(tag, tag) |
| exif[decoded] = value |
| |
| # Get file info |
| file_stats = os.stat(image_path) |
| created = datetime.fromtimestamp(file_stats.st_ctime) |
| modified = datetime.fromtimestamp(file_stats.st_mtime) |
| |
| exif['filename'] = os.path.basename(image_path) |
| exif['path'] = image_path |
| exif['size'] = file_stats.st_size |
| exif['created'] = created |
| exif['modified'] = modified |
| |
| return exif |
| except: |
| return None |
|
|
|
|
| def detect_faces(frame, min_confidence=0.7): |
| """ |
| Detect faces in a frame using OpenCV DNN |
| Returns list of face bounding boxes and confidence scores |
| """ |
| import cv2 |
| import numpy as np |
| |
| # Load pre-trained face detection model |
| model_file = "models/res10_300x300_ssd_iter_140000_fp16.caffemodel" |
| config_file = "models/deploy.prototxt" |
| net = cv2.dnn.readNetFromCaffe(config_file, model_file) |
| |
| (h, w) = frame.shape[:2] |
| blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, |
| (300, 300), (104.0, 177.0, 123.0)) |
| |
| net.setInput(blob) |
| detections = net.forward() |
| |
| faces = [] |
| for i in range(0, detections.shape[2]): |
| confidence = detections[0, 0, i, 2] |
| if confidence > min_confidence: |
| box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) |
| faces.append({ |
| 'box': box.astype("int"), |
| 'confidence': float(confidence) |
| }) |
| |
| return faces |
|
|
| def detect_objects(frame, min_confidence=0.5): |
| """ |
| Detect common objects using COCO-trained model |
| Returns list of detected objects with confidence |
| """ |
| import cv2 |
| import numpy as np |
| |
| # Load COCO class labels and model |
| classes = [] |
| with open("models/coco.names", "r") as f: |
| classes = [line.strip() for line in f.readlines()] |
| |
| model_file = "models/yolov3.weights" |
| config_file = "models/yolov3.cfg" |
| net = cv2.dnn.readNetFromDarknet(config_file, model_file) |
| |
| (h, w) = frame.shape[:2] |
| blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), |
| swapRB=True, crop=False) |
| |
| net.setInput(blob) |
| layer_names = net.getLayerNames() |
| output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] |
| outputs = net.forward(output_layers) |
| |
| objects = [] |
| for output in outputs: |
| for detection in output: |
| scores = detection[5:] |
| class_id = np.argmax(scores) |
| confidence = scores[class_id] |
| |
| if confidence > min_confidence: |
| center_x = int(detection[0] * w) |
| center_y = int(detection[1] * h) |
| width = int(detection[2] * w) |
| height = int(detection[3] * h) |
| |
| x = int(center_x - width / 2) |
| y = int(center_y - height / 2) |
| |
| objects.append({ |
| 'class': classes[class_id], |
| 'confidence': float(confidence), |
| 'box': (x, y, width, height) |
| }) |
| |
| return objects |
|
|
| def recognize_license_plate(frame): |
| """ |
| Attempt to recognize license plate text |
| Returns detected text and confidence |
| """ |
| import pytesseract |
| import cv2 |
| |
| # Preprocess frame for better OCR |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| blurred = cv2.GaussianBlur(gray, (5,5), 0) |
| thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] |
| |
| # Try to detect plates using contours |
| contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) |
| contours = contours[0] if len(contours) == 2 else contours[1] |
| |
| for c in contours: |
| x,y,w,h = cv2.boundingRect(c) |
| aspect_ratio = w / float(h) |
| |
| # Check if contour matches typical plate aspect ratio |
| if 2 < aspect_ratio < 5 and w > 100 and h > 30: |
| plate_roi = frame[y:y+h, x:x+w] |
| text = pytesseract.image_to_string(plate_roi, config='--psm 8') |
| if text.strip(): |
| return { |
| 'text': text.strip(), |
| 'box': (x,y,w,h) |
| } |
| |
| return None |
|
|
|
|
| def extract_frames(video_path, frames_dir, frame_rate=1): |
| """ |
| Extract frames from video at specified frame rate |
| Returns list of frame file paths |
| """ |
| import cv2 |
| import os |
| |
| if not os.path.exists(frames_dir): |
| os.makedirs(frames_dir) |
| |
| vidcap = cv2.VideoCapture(video_path) |
| fps = vidcap.get(cv2.CAP_PROP_FPS) |
| frame_interval = int(fps / frame_rate) |
| |
| count = 0 |
| frame_paths = [] |
| success, image = vidcap.read() |
| |
| while success: |
| if count % frame_interval == 0: |
| frame_path = os.path.join(frames_dir, f"frame_{count}.jpg") |
| cv2.imwrite(frame_path, image) |
| frame_paths.append(frame_path) |
| success, image = vidcap.read() |
| count += 1 |
| |
| return frame_paths |
|
|
| def apply_filters(frame, brightness=0, contrast=0, saturation=0, sharpness=0): |
| """ |
| Apply enhancement filters to a frame |
| Returns processed frame |
| """ |
| import cv2 |
| import numpy as np |
| |
| # Convert brightness/contrast values to OpenCV format |
| alpha = 1 + contrast/100 |
| beta = brightness |
| |
| # Apply brightness/contrast |
| frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=beta) |
| |
| # Convert to HSV for saturation adjustment |
| hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) |
| hsv[:,:,1] = hsv[:,:,1] * (1 + saturation/100) |
| frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) |
| |
| # Apply sharpening (unsharp mask) |
| if sharpness > 0: |
| blurred = cv2.GaussianBlur(frame, (0,0), 3) |
| frame = cv2.addWeighted(frame, 1 + sharpness/100, blurred, -sharpness/100, 0) |
| |
| return frame |
|
|
| def stabilize_video(input_path, output_path): |
| """ |
| Stabilize shaky video using OpenCV |
| Returns path to stabilized video |
| """ |
| import cv2 |
| |
| # Implementation would use feature detection and motion estimation |
| # This is a simplified placeholder |
| cap = cv2.VideoCapture(input_path) |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
| out = cv2.VideoWriter(output_path, fourcc, 30.0, |
| (int(cap.get(3)), int(cap.get(4)))) |
| |
| # Actual stabilization algorithm would go here |
| while cap.isOpened(): |
| ret, frame = cap.read() |
| if not ret: |
| break |
| out.write(frame) |
| |
| cap.release() |
| out.release() |
| return output_path |
|
|
|
|
| <!DOCTYPE html> |
| <html lang="en"> |
| <head> |
| <meta charset="UTF-8"> |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> |
| <title>Forensic Video Editor</title> |
| <script src="https://cdn.tailwindcss.com"></script> |
| <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css"> |
| <style> |
| .timeline-track { |
| height: 100px; |
| background-color: #2d3748; |
| position: relative; |
| } |
| .timeline-marker { |
| position: absolute; |
| width: 2px; |
| height: 100%; |
| background-color: red; |
| cursor: col-resize; |
| } |
| .video-container { |
| background-color: #1a202c; |
| } |
| .zoom-controls { |
| background-color: rgba(0, 0, 0, 0.7); |
| } |
| .frame-controls { |
| background-color: rgba(0, 0, 0, 0.7); |
| } |
| .enhancement-panel { |
| transition: all 0.3s ease; |
| } |
| .waveform { |
| height: 60px; |
| background: linear-gradient(90deg, #4a5568 0%, #2d3748 100%); |
| position: relative; |
| } |
| .waveform-bar { |
| position: absolute; |
| bottom: 0; |
| width: 2px; |
| background-color: #4299e1; |
| } |
| .metadata-panel { |
| max-height: 300px; |
| overflow-y: auto; |
| } |
| .video-frame { |
| border: 1px solid #4a5568; |
| cursor: pointer; |
| } |
| .video-frame.active { |
| border-color: #4299e1; |
| box-shadow: 0 0 0 2px #4299e1; |
| } |
| .color-picker { |
| width: 30px; |
| height: 30px; |
| border-radius: 50%; |
| cursor: pointer; |
| } |
| .modal-overlay { |
| background-color: rgba(0, 0, 0, 0.8); |
| } |
| .modal-content { |
| max-height: 90vh; |
| } |
| .filter-slider { |
| -webkit-appearance: none; |
| width: 100%; |
| height: 8px; |
| border-radius: 4px; |
| background: #4a5568; |
| outline: none; |
| } |
| .filter-slider::-webkit-slider-thumb { |
| -webkit-appearance: none; |
| appearance: none; |
| width: 16px; |
| height: 16px; |
| border-radius: 50%; |
| background: #4299e1; |
| cursor: pointer; |
| } |
| </style> |
| </head> |
| <body class="bg-gray-900 text-gray-200 font-sans"> |
| |
| <header class="bg-gray-800 p-4 flex justify-between items-center border-b border-gray-700"> |
| <div class="flex items-center space-x-4"> |
| <h1 class="text-2xl font-bold text-blue-400"> |
| <i class="fas fa-microscope mr-2"></i>Forensic Video Editor |
| </h1> |
| <div class="flex space-x-2"> |
| <button class="px-3 py-1 bg-blue-600 hover:bg-blue-700 rounded flex items-center"> |
| <i class="fas fa-folder-open mr-2"></i> Open |
| </button> |
| <button class="px-3 py-1 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-save mr-2"></i> Save |
| </button> |
| <button class="px-3 py-1 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-share-alt mr-2"></i> Export |
| </button> |
| </div> |
| </div> |
| <div class="flex items-center space-x-4"> |
| <div class="relative"> |
| <input type="text" placeholder="Search metadata..." class="bg-gray-700 px-4 py-1 rounded-full w-64 focus:outline-none focus:ring-2 focus:ring-blue-500"> |
| <i class="fas fa-search absolute right-3 top-2 text-gray-400"></i> |
| </div> |
| <button class="px-3 py-1 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-cog mr-2"></i> Settings |
| </button> |
| </div> |
| </header> |
|
|
| <div class="flex h-screen"> |
| |
| <div class="w-64 bg-gray-800 border-r border-gray-700 flex flex-col"> |
| <div class="p-4 border-b border-gray-700"> |
| <h2 class="font-semibold text-lg mb-2 flex items-center"> |
| <i class="fas fa-layer-group mr-2 text-blue-400"></i> Media Library |
| </h2> |
| <div class="flex space-x-2 mb-3"> |
| <button class="px-2 py-1 bg-blue-600 hover:bg-blue-700 rounded text-sm"> |
| <i class="fas fa-plus mr-1"></i> Add |
| </button> |
| <button class="px-2 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-trash mr-1"></i> Remove |
| </button> |
| </div> |
| <div class="bg-gray-700 rounded p-2 h-40 overflow-y-auto"> |
| <div class="flex items-center p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <i class="fas fa-video mr-2 text-red-400"></i> |
| <span class="truncate">Surveillance_Camera_1.mp4</span> |
| </div> |
| <div class="flex items-center p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <i class="fas fa-video mr-2 text-red-400"></i> |
| <span class="truncate">Bodycam_Officer_Johnson.mp4</span> |
| </div> |
| <div class="flex items-center p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <i class="fas fa-image mr-2 text-green-400"></i> |
| <span class="truncate">Crime_Scene_Photo_1.jpg</span> |
| </div> |
| <div class="flex items-center p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <i class="fas fa-file-audio mr-2 text-purple-400"></i> |
| <span class="truncate">911_Call_Recording.wav</span> |
| </div> |
| </div> |
| </div> |
|
|
| <div class="p-4 border-b border-gray-700"> |
| <h2 class="font-semibold text-lg mb-2 flex items-center"> |
| <i class="fas fa-tools mr-2 text-blue-400"></i> Tools |
| </h2> |
| <div class="grid grid-cols-2 gap-2"> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded flex flex-col items-center"> |
| <i class="fas fa-eye mb-1 text-yellow-400"></i> |
| <span class="text-xs">Enhance</span> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded flex flex-col items-center"> |
| <i class="fas fa-ruler mb-1 text-green-400"></i> |
| <span class="text-xs">Measure</span> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded flex flex-col items-center"> |
| <i class="fas fa-search-plus mb-1 text-blue-400"></i> |
| <span class="text-xs">Zoom</span> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded flex flex-col items-center"> |
| <i class="fas fa-bezier-curve mb-1 text-purple-400"></i> |
| <span class="text-xs">Stabilize</span> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded flex flex-col items-center"> |
| <i class="fas fa-lightbulb mb-1 text-orange-400"></i> |
| <span class="text-xs">Lighting</span> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded flex flex-col items-center"> |
| <i class="fas fa-fingerprint mb-1 text-red-400"></i> |
| <span class="text-xs">Identify</span> |
| </button> |
| </div> |
| </div> |
|
|
| <div class="p-4 flex-1 overflow-y-auto"> |
| <h2 class="font-semibold text-lg mb-2 flex items-center"> |
| <i class="fas fa-info-circle mr-2 text-blue-400"></i> Metadata |
| </h2> |
| <div class="metadata-panel bg-gray-700 rounded p-2 text-sm"> |
| <div class="mb-2"> |
| <div class="font-medium text-blue-300">File Information</div> |
| <div class="grid grid-cols-2 gap-1 mt-1"> |
| <div>Filename:</div> |
| <div>Surveillance_Camera_1.mp4</div> |
| <div>Format:</div> |
| <div>MP4 (H.264)</div> |
| <div>Duration:</div> |
| <div>00:23:45</div> |
| <div>Resolution:</div> |
| <div>1920x1080</div> |
| <div>Frame Rate:</div> |
| <div>30 fps</div> |
| <div>Size:</div> |
| <div>1.2 GB</div> |
| </div> |
| </div> |
| <div class="mb-2"> |
| <div class="font-medium text-blue-300">Device Information</div> |
| <div class="grid grid-cols-2 gap-1 mt-1"> |
| <div>Make:</div> |
| <div>Axis Communications</div> |
| <div>Model:</div> |
| <div>AXIS P1365</div> |
| <div>Serial:</div> |
| <div>AXP1365-987654</div> |
| <div>Firmware:</div> |
| <div>v5.55.2</div> |
| </div> |
| </div> |
| <div> |
| <div class="font-medium text-blue-300">Timestamps</div> |
| <div class="grid grid-cols-2 gap-1 mt-1"> |
| <div>Created:</div> |
| <div>2023-05-15 14:23:45</div> |
| <div>Modified:</div> |
| <div>2023-05-15 14:23:45</div> |
| <div>Accessed:</div> |
| <div>2023-06-20 09:12:33</div> |
| </div> |
| </div> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="flex-1 flex flex-col overflow-hidden"> |
| |
| <div class="video-container flex-1 flex items-center justify-center relative"> |
| <div class="relative"> |
| <video id="mainVideo" class="max-w-full max-h-[70vh]"> |
| Your browser does not support the video tag. |
| </video> |
| <input type="file" id="videoUpload" accept="video/mp4" class="hidden"> |
| <div class="zoom-controls absolute top-2 right-2 flex flex-col space-y-2 p-2 rounded"> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-search-plus"></i> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-search-minus"></i> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-expand"></i> |
| </button> |
| </div> |
| <div class="frame-controls absolute bottom-2 left-0 right-0 flex justify-center space-x-4 p-2 rounded"> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-step-backward"></i> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-backward"></i> |
| </button> |
| <button class="p-2 bg-blue-600 hover:bg-blue-700 rounded-full"> |
| <i class="fas fa-play"></i> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-forward"></i> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-step-forward"></i> |
| </button> |
| <button class="p-2 bg-gray-700 hover:bg-gray-600 rounded-full"> |
| <i class="fas fa-pause"></i> |
| </button> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="enhancement-panel bg-gray-800 border-t border-gray-700 p-4"> |
| <div class="flex justify-between items-center mb-3"> |
| <h3 class="font-semibold text-lg flex items-center"> |
| <i class="fas fa-magic mr-2 text-purple-400"></i> Video Enhancements |
| </h3> |
| <div class="flex space-x-2"> |
| <button class="px-3 py-1 bg-blue-600 hover:bg-blue-700 rounded text-sm"> |
| <i class="fas fa-sliders-h mr-1"></i> Presets |
| </button> |
| <button class="px-3 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-redo mr-1"></i> Reset All |
| </button> |
| </div> |
| </div> |
| <div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4"> |
| <div class="bg-gray-700 p-3 rounded"> |
| <div class="flex justify-between items-center mb-2"> |
| <span class="font-medium">Brightness</span> |
| <span class="text-xs bg-gray-600 px-2 py-1 rounded">+15%</span> |
| </div> |
| <input type="range" min="-100" max="100" value="15" class="filter-slider w-full"> |
| </div> |
| <div class="bg-gray-700 p-3 rounded"> |
| <div class="flex justify-between items-center mb-2"> |
| <span class="font-medium">Contrast</span> |
| <span class="text-xs bg-gray-600 px-2 py-1 rounded">+20%</span> |
| </div> |
| <input type="range" min="-100" max="100" value="20" class="filter-slider w-full"> |
| </div> |
| <div class="bg-gray-700 p-3 rounded"> |
| <div class="flex justify-between items-center mb-2"> |
| <span class="font-medium">Saturation</span> |
| <span class="text-xs bg-gray-600 px-2 py-1 rounded">+10%</span> |
| </div> |
| <input type="range" min="-100" max="100" value="10" class="filter-slider w-full"> |
| </div> |
| <div class="bg-gray-700 p-3 rounded"> |
| <div class="flex justify-between items-center mb-2"> |
| <span class="font-medium">Sharpness</span> |
| <span class="text-xs bg-gray-600 px-2 py-1 rounded">+25%</span> |
| </div> |
| <input type="range" min="0" max="100" value="25" class="filter-slider w-full"> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="bg-gray-800 border-t border-gray-700 p-2"> |
| <div class="flex justify-between items-center mb-2"> |
| <div class="flex space-x-2"> |
| <button class="px-2 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-cut mr-1"></i> Split |
| </button> |
| <button class="px-2 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-trash-alt mr-1"></i> Delete |
| </button> |
| <button class="px-2 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-bookmark mr-1"></i> Mark |
| </button> |
| </div> |
| <div class="text-sm"> |
| <span class="font-mono">00:12:34:15</span> / <span class="text-gray-400">00:23:45:00</span> |
| </div> |
| </div> |
|
|
| <div class="timeline-track rounded mb-2"> |
| <div class="waveform"> |
| |
| <div class="absolute inset-0 flex items-end"> |
| <div class="waveform-bar" style="left: 0%; height: 20%;"></div> |
| <div class="waveform-bar" style="left: 1%; height: 45%;"></div> |
| <div class="waveform-bar" style="left: 2%; height: 30%;"></div> |
| |
| </div> |
| </div> |
| <div class="timeline-marker" style="left: 30%;"></div> |
| <div class="absolute bottom-0 left-0 right-0 h-6 bg-gray-900 bg-opacity-50 flex"> |
| <div class="border-r border-gray-600 w-16 flex-shrink-0"></div> |
| <div class="border-r border-gray-600 w-16 flex-shrink-0"></div> |
| |
| </div> |
| </div> |
|
|
| <div class="flex space-x-2 overflow-x-auto py-2"> |
| <div class="video-frame w-24 h-16 bg-gray-700 rounded flex-shrink-0"> |
| <img src="https://via.placeholder.com/96x54/4a5568/ffffff?text=Frame+1" class="w-full h-full object-cover rounded"> |
| </div> |
| <div class="video-frame w-24 h-16 bg-gray-700 rounded flex-shrink-0"> |
| <img src="https://via.placeholder.com/96x54/4a5568/ffffff?text=Frame+2" class="w-full h-full object-cover rounded"> |
| </div> |
| <div class="video-frame active w-24 h-16 bg-gray-700 rounded flex-shrink-0"> |
| <img src="https://via.placeholder.com/96x54/4a5568/ffffff?text=Frame+3" class="w-full h-full object-cover rounded"> |
| </div> |
| <div class="video-frame w-24 h-16 bg-gray-700 rounded flex-shrink-0"> |
| <img src="https://via.placeholder.com/96x54/4a5568/ffffff?text=Frame+4" class="w-full h-full object-cover rounded"> |
| </div> |
| |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="w-64 bg-gray-800 border-l border-gray-700 flex flex-col"> |
| <div class="p-4 border-b border-gray-700"> |
| <h2 class="font-semibold text-lg mb-2 flex items-center"> |
| <i class="fas fa-tags mr-2 text-blue-400"></i> Tags & Annotations |
| </h2> |
| <div class="flex space-x-2 mb-3"> |
| <button class="px-2 py-1 bg-blue-600 hover:bg-blue-700 rounded text-sm"> |
| <i class="fas fa-plus mr-1"></i> Add Tag |
| </button> |
| <button class="px-2 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-highlighter mr-1"></i> Annotate |
| </button> |
| </div> |
| <div class="bg-gray-700 rounded p-2 h-40 overflow-y-auto"> |
| <div class="flex items-center justify-between p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <div class="flex items-center"> |
| <div class="w-3 h-3 rounded-full bg-red-500 mr-2"></div> |
| <span>Suspicious Activity</span> |
| </div> |
| <span class="text-xs text-gray-400">00:02:15</span> |
| </div> |
| <div class="flex items-center justify-between p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <div class="flex items-center"> |
| <div class="w-3 h-3 rounded-full bg-yellow-500 mr-2"></div> |
| <span>Weapon Visible</span> |
| </div> |
| <span class="text-xs text-gray-400">00:05:42</span> |
| </div> |
| <div class="flex items-center justify-between p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <div class="flex items-center"> |
| <div class="w-3 h-3 rounded-full bg-green-500 mr-2"></div> |
| <span>License Plate</span> |
| </div> |
| <span class="text-xs text-gray-400">00:08:33</span> |
| </div> |
| <div class="flex items-center justify-between p-1 hover:bg-gray-600 rounded cursor-pointer"> |
| <div class="flex items-center"> |
| <div class="w-3 h-3 rounded-full bg-blue-500 mr-2"></div> |
| <span>Person of Interest</span> |
| </div> |
| <span class="text-xs text-gray-400">00:12:07</span> |
| </div> |
| </div> |
| </div> |
|
|
| <div class="p-4 border-b border-gray-700"> |
| <h2 class="font-semibold text-lg mb-2 flex items-center"> |
| <i class="fas fa-palette mr-2 text-blue-400"></i> Color Correction |
| </h2> |
| <div class="grid grid-cols-3 gap-2 mb-3"> |
| <div class="color-picker bg-red-500"></div> |
| <div class="color-picker bg-green-500"></div> |
| <div class="color-picker bg-blue-500"></div> |
| <div class="color-picker bg-yellow-500"></div> |
| <div class="color-picker bg-purple-500"></div> |
| <div class="color-picker bg-pink-500"></div> |
| </div> |
| <div class="space-y-3"> |
| <div> |
| <label class="block text-sm mb-1">Temperature</label> |
| <input type="range" min="-100" max="100" value="0" class="filter-slider w-full"> |
| </div> |
| <div> |
| <label class="block text-sm mb-1">Tint</label> |
| <input type="range" min="-100" max="100" value="0" class="filter-slider w-full"> |
| </div> |
| <div> |
| <label class="block text-sm mb-1">Exposure</label> |
| <input type="range" min="-100" max="100" value="0" class="filter-slider w-full"> |
| </div> |
| </div> |
| </div> |
|
|
| <div class="p-4 flex-1 overflow-y-auto"> |
| <h2 class="font-semibold text-lg mb-2 flex items-center"> |
| <i class="fas fa-chart-line mr-2 text-blue-400"></i> Analysis Tools |
| </h2> |
| <div class="mb-4"> |
| <h3 class="font-medium mb-2 flex items-center"> |
| <i class="fas fa-code mr-2 text-green-400"></i> Python Script |
| </h3> |
| <div class="bg-gray-700 rounded p-2 mb-2 h-32"> |
| <textarea id="pythonScript" class="w-full h-full bg-gray-800 text-xs font-mono p-2 rounded focus:outline-none focus:ring-1 focus:ring-blue-500" placeholder="Enter Python script here..."></textarea> |
| </div> |
| <div class="flex space-x-2"> |
| <button id="runScript" class="px-3 py-1 bg-green-600 hover:bg-green-700 rounded text-sm"> |
| <i class="fas fa-play mr-1"></i> Run |
| </button> |
| <button id="saveScript" class="px-3 py-1 bg-blue-600 hover:bg-blue-700 rounded text-sm"> |
| <i class="fas fa-save mr-1"></i> Save |
| </button> |
| <button id="loadScript" class="px-3 py-1 bg-gray-700 hover:bg-gray-600 rounded text-sm"> |
| <i class="fas fa-folder-open mr-1"></i> Load |
| </button> |
| </div> |
| </div> |
| </h2> |
| <div class="space-y-3"> |
| <button class="w-full p-2 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-user-shield mr-2 text-green-400"></i> |
| <span>Face Detection</span> |
| </button> |
| <button class="w-full p-2 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-car mr-2 text-blue-400"></i> |
| <span>License Plate Recognition</span> |
| </button> |
| <button class="w-full p-2 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-route mr-2 text-purple-400"></i> |
| <span>Motion Tracking</span> |
| </button> |
| <button class="w-full p-2 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-arrows-alt mr-2 text-yellow-400"></i> |
| <span>Object Measurement</span> |
| </button> |
| <button class="w-full p-2 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-wave-square mr-2 text-red-400"></i> |
| <span>Audio Analysis</span> |
| </button> |
| <button class="w-full p-2 bg-gray-700 hover:bg-gray-600 rounded flex items-center"> |
| <i class="fas fa-clock mr-2 text-orange-400"></i> |
| <span>Timeline Reconstruction</span> |
| </button> |
| </div> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div id="analysisModal" class="fixed inset-0 z-50 flex items-center justify-center modal-overlay hidden"> |
| <div class="bg-gray-800 rounded-lg shadow-xl w-3/4 max-w-4xl modal-content"> |
| <div class="p-4 border-b border-gray-700 flex justify-between items-center"> |
| <h3 class="text-xl font-semibold">Analysis Results</h3> |
| <button id="closeModal" class="text-gray-400 hover:text-white"> |
| <i class="fas fa-times"></i> |
| </button> |
| </div> |
| <div class="p-4"> |
| <div class="grid grid-cols-2 gap-4"> |
| <div> |
| <h4 class="font-medium mb-2 text-blue-300">Detected Faces</h4> |
| <div class="grid grid-cols-3 gap-2"> |
| <div class="bg-gray-700 p-2 rounded flex flex-col items-center"> |
| <img src="https://via.placeholder.com/80x80/4a5568/ffffff?text=Face+1" class="w-16 h-16 rounded-full mb-1"> |
| <span class="text-xs">Confidence: 87%</span> |
| </div> |
| <div class="bg-gray-700 p-2 rounded flex flex-col items-center"> |
| <img src="https://via.placeholder.com/80x80/4a5568/ffffff?text=Face+2" class="w-16 h-16 rounded-full mb-1"> |
| <span class="text-xs">Confidence: 92%</span> |
| </div> |
| <div class="bg-gray-700 p-2 rounded flex flex-col items-center"> |
| <img src="https://via.placeholder.com/80x80/4a5568/ffffff?text=Face+3" class="w-16 h-16 rounded-full mb-1"> |
| <span class="text-xs">Confidence: 78%</span> |
| </div> |
| </div> |
| </div> |
| <div> |
| <h4 class="font-medium mb-2 text-blue-300">Detected Objects</h4> |
| <div class="bg-gray-700 rounded p-3"> |
| <div class="flex justify-between mb-1"> |
| <span>Person</span> |
| <span class="text-blue-300">92%</span> |
| </div> |
| <div class="flex justify-between mb-1"> |
| <span>Vehicle</span> |
| <span class="text-blue-300">85%</span> |
| </div> |
| <div class="flex justify-between mb-1"> |
| <span>Weapon</span> |
| <span class="text-blue-300">67%</span> |
| </div> |
| <div class="flex justify-between"> |
| <span>Bag</span> |
| <span class="text-blue-300">54%</span> |
| </div> |
| </div> |
| </div> |
| </div> |
| <div class="mt-4"> |
| <h4 class="font-medium mb-2 text-blue-300">Timeline of Events</h4> |
| <div class="bg-gray-700 rounded p-3"> |
| <div class="flex items-start mb-2"> |
| <div class="bg-blue-500 rounded-full w-3 h-3 mt-1 mr-2"></div> |
| <div> |
| <div class="font-medium">00:02:15 - Person enters frame</div> |
| <div class="text-sm text-gray-400">Left side of the building</div> |
| </div> |
| </div> |
| <div class="flex items-start mb-2"> |
| <div class="bg-red-500 rounded-full w-3 h-3 mt-1 mr-2"></div> |
| <div> |
| <div class="font-medium">00:05:42 - Object exchange</div> |
| <div class="text-sm text-gray-400">Between two individuals</div> |
| </div> |
| </div> |
| <div class="flex items-start"> |
| <div class="bg-yellow-500 rounded-full w-3 h-3 mt-1 mr-2"></div> |
| <div> |
| <div class="font-medium">00:08:33 - Vehicle departs</div> |
| <div class="text-sm text-gray-400">License plate partially visible</div> |
| </div> |
| </div> |
| </div> |
| </div> |
| </div> |
| <div class="p-4 border-t border-gray-700 flex justify-end"> |
| <button class="px-4 py-2 bg-blue-600 hover:bg-blue-700 rounded mr-2"> |
| Export Report |
| </button> |
| <button id="closeModalBtn" class="px-4 py-2 bg-gray-700 hover:bg-gray-600 rounded"> |
| Close |
| </button> |
| </div> |
| </div> |
| </div> |
|
|
| <script> |
| |
| document.addEventListener('DOMContentLoaded', function() { |
| const video = document.getElementById('mainVideo'); |
| let videoDuration = 0; |
| let videoFrames = []; |
| let currentFrameIndex = 0; |
| |
| |
| const videoUpload = document.getElementById('videoUpload'); |
| videoUpload.addEventListener('change', function(e) { |
| if (e.target.files && e.target.files[0]) { |
| const file = e.target.files[0]; |
| const videoURL = URL.createObjectURL(file); |
| video.src = videoURL; |
| |
| video.onloadedmetadata = function() { |
| videoDuration = video.duration; |
| initializeTimeline(); |
| extractVideoFrames(); |
| updateTimeDisplay(); |
| }; |
| } |
| }); |
| |
| |
| function initializeTimeline() { |
| const timeline = document.querySelector('.timeline-track'); |
| const waveform = document.querySelector('.waveform'); |
| |
| |
| timeline.innerHTML = ` |
| <div class="waveform"> |
| <div class="absolute inset-0 flex items-end" id="waveformBars"></div> |
| </div> |
| <div class="timeline-marker" style="left: 0%;"></div> |
| <div class="absolute bottom-0 left-0 right-0 h-6 bg-gray-900 bg-opacity-50 flex" id="timelineTicks"></div> |
| `; |
| |
| |
| const ticksContainer = document.getElementById('timelineTicks'); |
| const totalTicks = Math.floor(videoDuration / 5); |
| for (let i = 0; i < totalTicks; i++) { |
| const tick = document.createElement('div'); |
| tick.className = 'border-r border-gray-600 w-16 flex-shrink-0'; |
| tick.textContent = i === 0 ? '00:00' : `${i * 5}s`; |
| ticksContainer.appendChild(tick); |
| } |
| |
| |
| const waveformBars = document.getElementById('waveformBars'); |
| for (let i = 0; i < 200; i++) { |
| const bar = document.createElement('div'); |
| bar.className = 'waveform-bar'; |
| bar.style.left = `${(i / 200) * 100}%`; |
| bar.style.height = `${Math.random() * 60 + 10}%`; |
| waveformBars.appendChild(bar); |
| } |
| } |
| |
| |
| function extractVideoFrames() { |
| const framesContainer = document.querySelector('.flex.space-x-2.overflow-x-auto.py-2'); |
| framesContainer.innerHTML = ''; |
| |
| |
| |
| const totalFrames = Math.min(20, Math.floor(videoDuration * 2)); |
| videoFrames = []; |
| |
| for (let i = 0; i < totalFrames; i++) { |
| const frameTime = (i / totalFrames) * videoDuration; |
| videoFrames.push(frameTime); |
| |
| const frame = document.createElement('div'); |
| frame.className = `video-frame w-24 h-16 bg-gray-700 rounded flex-shrink-0`; |
| frame.dataset.time = frameTime; |
| frame.innerHTML = ` |
| <img src="https://via.placeholder.com/96x54/4a5568/ffffff?text=Frame+${i+1}" class="w-full h-full object-cover rounded"> |
| <div class="absolute bottom-0 left-0 right-0 bg-black bg-opacity-50 text-white text-xs p-1 text-center"> |
| ${formatTime(frameTime)} |
| </div> |
| `; |
| |
| frame.addEventListener('click', function() { |
| video.currentTime = parseFloat(this.dataset.time); |
| updateActiveFrame(); |
| }); |
| |
| framesContainer.appendChild(frame); |
| } |
| |
| updateActiveFrame(); |
| } |
| |
| |
| function updateActiveFrame() { |
| const frames = document.querySelectorAll('.video-frame'); |
| frames.forEach((frame, index) => { |
| frame.classList.toggle('active', index === currentFrameIndex); |
| }); |
| } |
| |
| |
| function formatTime(seconds) { |
| const date = new Date(seconds * 1000); |
| return date.toISOString().substr(11, 8); |
| } |
| |
| |
| function updateTimeDisplay() { |
| const timeDisplay = document.querySelector('.text-sm.font-mono'); |
| if (timeDisplay) { |
| timeDisplay.innerHTML = ` |
| <span>${formatTime(video.currentTime)}</span> / |
| <span class="text-gray-400">${formatTime(videoDuration)}</span> |
| `; |
| } |
| } |
| |
| |
| const playBtn = document.querySelector('.frame-controls .fa-play').parentElement; |
| const pauseBtn = document.querySelector('.frame-controls .fa-pause').parentElement; |
| |
| playBtn.addEventListener('click', function() { |
| this.classList.replace('bg-gray-700', 'bg-blue-600'); |
| pauseBtn.classList.replace('bg-blue-600', 'bg-gray-700'); |
| video.play(); |
| |
| |
| const frameCheck = setInterval(() => { |
| updateCurrentFrameIndex(); |
| updateTimeDisplay(); |
| |
| if (video.paused || video.ended) { |
| clearInterval(frameCheck); |
| if (video.ended) { |
| playBtn.classList.replace('bg-blue-600', 'bg-gray-700'); |
| pauseBtn.classList.replace('bg-gray-700', 'bg-blue-600'); |
| } |
| } |
| }, 100); |
| }); |
| |
| pauseBtn.addEventListener('click', function() { |
| this.classList.replace('bg-gray-700', 'bg-blue-600'); |
| playBtn.classList.replace('bg-blue-600', 'bg-gray-700'); |
| video.pause(); |
| updateTimeDisplay(); |
| }); |
| |
| |
| document.querySelector('.fa-step-backward').parentElement.addEventListener('click', () => { |
| video.currentTime = 0; |
| updateCurrentFrameIndex(); |
| updateTimeDisplay(); |
| }); |
| |
| document.querySelector('.fa-backward').parentElement.addEventListener('click', () => { |
| video.currentTime = Math.max(0, video.currentTime - 5); |
| updateCurrentFrameIndex(); |
| updateTimeDisplay(); |
| }); |
| |
| document.querySelector('.fa-forward').parentElement.addEventListener('click', () => { |
| video.currentTime = Math.min(videoDuration, video.currentTime + 5); |
| updateCurrentFrameIndex(); |
| updateTimeDisplay(); |
| }); |
| |
| document.querySelector('.fa-step-forward').parentElement.addEventListener('click', () => { |
| video.currentTime = videoDuration; |
| updateCurrentFrameIndex(); |
| updateTimeDisplay(); |
| }); |
| |
| |
| function updateCurrentFrameIndex() { |
| if (videoFrames.length === 0) return; |
| |
| for (let i = 0; i < videoFrames.length; i++) { |
| if (video.currentTime < videoFrames[i]) { |
| currentFrameIndex = Math.max(0, i - 1); |
| updateActiveFrame(); |
| return; |
| } |
| } |
| currentFrameIndex = videoFrames.length - 1; |
| updateActiveFrame(); |
| } |
| |
| |
| document.querySelector('.fa-expand').parentElement.addEventListener('click', () => { |
| if (video.requestFullscreen) { |
| video.requestFullscreen(); |
| } else if (video.webkitRequestFullscreen) { |
| video.webkitRequestFullscreen(); |
| } else if (video.msRequestFullscreen) { |
| video.msRequestFullscreen(); |
| } |
| }); |
| |
| |
| const frames = document.querySelectorAll('.video-frame'); |
| frames.forEach(frame => { |
| frame.addEventListener('click', function() { |
| frames.forEach(f => f.classList.remove('active')); |
| this.classList.add('active'); |
| }); |
| }); |
| |
| |
| const addTagBtn = document.querySelector('button:has(.fa-plus)'); |
| const annotateBtn = document.querySelector('button:has(.fa-highlighter)'); |
| const tagsContainer = document.querySelector('.bg-gray-700.rounded.p-2.h-40'); |
| |
| addTagBtn.addEventListener('click', () => { |
| const tagName = prompt('Enter tag name:'); |
| if (tagName) { |
| const colors = ['red', 'yellow', 'green', 'blue', 'purple', 'pink']; |
| const randomColor = colors[Math.floor(Math.random() * colors.length)]; |
| |
| const tag = document.createElement('div'); |
| tag.className = 'flex items-center justify-between p-1 hover:bg-gray-600 rounded cursor-pointer'; |
| tag.innerHTML = ` |
| <div class="flex items-center"> |
| <div class="w-3 h-3 rounded-full bg-${randomColor}-500 mr-2"></div> |
| <span>${tagName}</span> |
| </div> |
| <span class="text-xs text-gray-400">${formatTime(video.currentTime)}</span> |
| `; |
| |
| tagsContainer.appendChild(tag); |
| } |
| }); |
| |
| annotateBtn.addEventListener('click', () => { |
| alert('Annotation mode activated. Click on the video to add annotations.'); |
| |
| }); |
| |
| |
| const pythonScript = document.getElementById('pythonScript'); |
| const runScriptBtn = document.getElementById('runScript'); |
| const saveScriptBtn = document.getElementById('saveScript'); |
| const loadScriptBtn = document.getElementById('loadScript'); |
| |
| runScriptBtn.addEventListener('click', () => { |
| const script = pythonScript.value; |
| if (script.trim()) { |
| const result = executePythonScript(script); |
| alert(result); |
| } else { |
| alert('Please enter a Python script first'); |
| } |
| }); |
| |
| saveScriptBtn.addEventListener('click', () => { |
| const script = pythonScript.value; |
| if (script.trim()) { |
| const blob = new Blob([script], { type: 'text/plain' }); |
| const url = URL.createObjectURL(blob); |
| const a = document.createElement('a'); |
| a.href = url; |
| a.download = 'forensic_script.py'; |
| a.click(); |
| URL.revokeObjectURL(url); |
| } else { |
| alert('No script to save'); |
| } |
| }); |
| |
| loadScriptBtn.addEventListener('click', () => { |
| const input = document.createElement('input'); |
| input.type = 'file'; |
| input.accept = '.py,.txt'; |
| input.onchange = e => { |
| const file = e.target.files[0]; |
| const reader = new FileReader(); |
| reader.onload = event => { |
| pythonScript.value = event.target.result; |
| }; |
| reader.readAsText(file); |
| }; |
| input.click(); |
| }); |
| |
| |
| const analysisButtons = document.querySelectorAll('.analysis-tools button'); |
| const analysisModal = document.getElementById('analysisModal'); |
| const closeModalBtns = [document.getElementById('closeModal'), document.getElementById('closeModalBtn')]; |
| |
| analysisButtons.forEach(btn => { |
| btn.addEventListener('click', function() { |
| analysisModal.classList.remove('hidden'); |
| }); |
| }); |
| |
| closeModalBtns.forEach(btn => { |
| btn.addEventListener('click', function() { |
| analysisModal.classList.add('hidden'); |
| }); |
| }); |
| |
| |
| const sliders = document.querySelectorAll('.filter-slider'); |
| sliders.forEach(slider => { |
| const label = slider.parentElement.querySelector('span.font-medium').textContent; |
| slider.setAttribute('aria-label', label); |
| |
| const valueDisplay = slider.parentElement.querySelector('span.text-xs'); |
| if (valueDisplay) { |
| slider.addEventListener('input', function() { |
| valueDisplay.textContent = `${this.value > 0 ? '+' : ''}${this.value}%`; |
| applyVideoFilters(); |
| }); |
| } |
| }); |
| |
| |
| function applyVideoFilters() { |
| const brightness = document.querySelector('input[aria-label="Brightness"]').value; |
| const contrast = document.querySelector('input[aria-label="Contrast"]').value; |
| const saturation = document.querySelector('input[aria-label="Saturation"]').value; |
| const sharpness = document.querySelector('input[aria-label="Sharpness"]').value; |
| |
| video.style.filter = ` |
| brightness(${1 + brightness/100}) |
| contrast(${1 + contrast/100}) |
| saturate(${1 + saturation/100}) |
| `; |
| |
| |
| } |
| |
| |
| document.querySelector('button:has(.fa-redo)').addEventListener('click', () => { |
| sliders.forEach(slider => { |
| |
| const label = slider.getAttribute('aria-label'); |
| if (label === 'Brightness') slider.value = 15; |
| else if (label === 'Contrast') slider.value = 20; |
| else if (label === 'Saturation') slider.value = 10; |
| else if (label === 'Sharpness') slider.value = 25; |
| else slider.value = 0; |
| |
| const valueDisplay = slider.parentElement.querySelector('span.text-xs'); |
| if (valueDisplay) { |
| valueDisplay.textContent = `${slider.value > 0 ? '+' : ''}${slider.value}%`; |
| } |
| }); |
| applyVideoFilters(); |
| }); |
| |
| |
| document.querySelector('button:has(.fa-folder-open)').addEventListener('click', () => { |
| videoUpload.click(); |
| }); |
| }); |
| </script> |
| |
| <script src="python/video_processing.py"></script> |
| <script src="python/analysis_tools.py"></script> |
| <script src="python/metadata_utils.py"></script> |
| <script src="python/script_runner.py"></script> |
| <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=Crazyka51/editor" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body> |
| </html> |