Crazyka51 commited on
Commit
70f8ba9
·
verified ·
1 Parent(s): 5e9d3f3

create python files for all functions. - Follow Up Deployment

Browse files
Files changed (1) hide show
  1. index.html +400 -1
index.html CHANGED
@@ -1,3 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  <!DOCTYPE html>
2
  <html lang="en">
3
  <head>
@@ -953,5 +1352,5 @@
953
  });
954
  });
955
  </script>
956
- <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=Crazyka51/editor" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
957
  </html>
 
1
+
2
+ def execute_python_script(script_code, context=None):
3
+ """
4
+ Safely execute Python script with limited context
5
+ Returns script output or error message
6
+ """
7
+ import io
8
+ import sys
9
+ from contextlib import redirect_stdout, redirect_stderr
10
+
11
+ if context is None:
12
+ context = {
13
+ 'video_path': None,
14
+ 'frame_data': None,
15
+ 'metadata': None
16
+ }
17
+
18
+ # Capture output
19
+ output = io.StringIO()
20
+ error = io.StringIO()
21
+
22
+ try:
23
+ with redirect_stdout(output), redirect_stderr(error):
24
+ # Create restricted execution environment
25
+ exec_globals = {
26
+ '__builtins__': {
27
+ 'print': print,
28
+ 'str': str,
29
+ 'int': int,
30
+ 'float': float,
31
+ 'list': list,
32
+ 'dict': dict,
33
+ 'tuple': tuple,
34
+ 'range': range,
35
+ 'len': len,
36
+ 'enumerate': enumerate,
37
+ 'zip': zip,
38
+ 'min': min,
39
+ 'max': max,
40
+ 'sum': sum,
41
+ 'abs': abs,
42
+ 'round': round
43
+ },
44
+ 'context': context
45
+ }
46
+
47
+ exec(script_code, exec_globals)
48
+
49
+ if error.getvalue():
50
+ return f"Error: {error.getvalue()}"
51
+ else:
52
+ return output.getvalue()
53
+
54
+ except Exception as e:
55
+ return f"Error executing script: {str(e)}"
56
+
57
+ def validate_python_script(script_code):
58
+ """
59
+ Validate Python script syntax and restricted functions
60
+ Returns (is_valid, error_message)
61
+ """
62
+ import ast
63
+
64
+ try:
65
+ # Parse script into AST
66
+ tree = ast.parse(script_code)
67
+
68
+ # Check for disallowed nodes
69
+ for node in ast.walk(tree):
70
+ if isinstance(node, ast.Import):
71
+ return (False, "Import statements are not allowed")
72
+ if isinstance(node, ast.ImportFrom):
73
+ return (False, "Import statements are not allowed")
74
+ if isinstance(node, ast.Call):
75
+ if isinstance(node.func, ast.Name):
76
+ if node.func.id in ['eval', 'exec', 'open', 'execfile']:
77
+ return (False, f"Function {node.func.id}() is not allowed")
78
+
79
+ return (True, "Script is valid")
80
+ except SyntaxError as e:
81
+ return (False, f"Syntax error: {str(e)}")
82
+
83
+
84
+ def extract_video_metadata(video_path):
85
+ """
86
+ Extract technical metadata from video file
87
+ Returns dictionary of metadata
88
+ """
89
+ import cv2
90
+ from datetime import datetime
91
+ import os
92
+
93
+ cap = cv2.VideoCapture(video_path)
94
+
95
+ if not cap.isOpened():
96
+ return None
97
+
98
+ # Get basic video properties
99
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
100
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
101
+ fps = cap.get(cv2.CAP_PROP_FPS)
102
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
103
+ duration = frame_count / fps
104
+
105
+ # Get file info
106
+ file_stats = os.stat(video_path)
107
+ created = datetime.fromtimestamp(file_stats.st_ctime)
108
+ modified = datetime.fromtimestamp(file_stats.st_mtime)
109
+
110
+ cap.release()
111
+
112
+ return {
113
+ 'filename': os.path.basename(video_path),
114
+ 'path': video_path,
115
+ 'resolution': f"{width}x{height}",
116
+ 'fps': fps,
117
+ 'duration': duration,
118
+ 'frame_count': frame_count,
119
+ 'size': file_stats.st_size,
120
+ 'created': created,
121
+ 'modified': modified
122
+ }
123
+
124
+ def extract_audio_metadata(audio_path):
125
+ """
126
+ Extract technical metadata from audio file
127
+ Returns dictionary of metadata
128
+ """
129
+ import wave
130
+ from datetime import datetime
131
+ import os
132
+
133
+ try:
134
+ with wave.open(audio_path, 'rb') as audio_file:
135
+ channels = audio_file.getnchannels()
136
+ sample_width = audio_file.getsampwidth()
137
+ framerate = audio_file.getframerate()
138
+ frames = audio_file.getnframes()
139
+ duration = frames / float(framerate)
140
+
141
+ file_stats = os.stat(audio_path)
142
+ created = datetime.fromtimestamp(file_stats.st_ctime)
143
+ modified = datetime.fromtimestamp(file_stats.st_mtime)
144
+
145
+ return {
146
+ 'filename': os.path.basename(audio_path),
147
+ 'path': audio_path,
148
+ 'channels': channels,
149
+ 'sample_width': sample_width,
150
+ 'sample_rate': framerate,
151
+ 'duration': duration,
152
+ 'size': file_stats.st_size,
153
+ 'created': created,
154
+ 'modified': modified
155
+ }
156
+ except:
157
+ return None
158
+
159
+ def extract_exif_data(image_path):
160
+ """
161
+ Extract EXIF metadata from image file
162
+ Returns dictionary of EXIF data
163
+ """
164
+ from PIL import Image, ExifTags
165
+ from datetime import datetime
166
+ import os
167
+
168
+ try:
169
+ img = Image.open(image_path)
170
+ exif_data = img._getexif()
171
+
172
+ if not exif_data:
173
+ return None
174
+
175
+ exif = {}
176
+ for tag, value in exif_data.items():
177
+ decoded = ExifTags.TAGS.get(tag, tag)
178
+ exif[decoded] = value
179
+
180
+ # Get file info
181
+ file_stats = os.stat(image_path)
182
+ created = datetime.fromtimestamp(file_stats.st_ctime)
183
+ modified = datetime.fromtimestamp(file_stats.st_mtime)
184
+
185
+ exif['filename'] = os.path.basename(image_path)
186
+ exif['path'] = image_path
187
+ exif['size'] = file_stats.st_size
188
+ exif['created'] = created
189
+ exif['modified'] = modified
190
+
191
+ return exif
192
+ except:
193
+ return None
194
+
195
+
196
+ def detect_faces(frame, min_confidence=0.7):
197
+ """
198
+ Detect faces in a frame using OpenCV DNN
199
+ Returns list of face bounding boxes and confidence scores
200
+ """
201
+ import cv2
202
+ import numpy as np
203
+
204
+ # Load pre-trained face detection model
205
+ model_file = "models/res10_300x300_ssd_iter_140000_fp16.caffemodel"
206
+ config_file = "models/deploy.prototxt"
207
+ net = cv2.dnn.readNetFromCaffe(config_file, model_file)
208
+
209
+ (h, w) = frame.shape[:2]
210
+ blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
211
+ (300, 300), (104.0, 177.0, 123.0))
212
+
213
+ net.setInput(blob)
214
+ detections = net.forward()
215
+
216
+ faces = []
217
+ for i in range(0, detections.shape[2]):
218
+ confidence = detections[0, 0, i, 2]
219
+ if confidence > min_confidence:
220
+ box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
221
+ faces.append({
222
+ 'box': box.astype("int"),
223
+ 'confidence': float(confidence)
224
+ })
225
+
226
+ return faces
227
+
228
+ def detect_objects(frame, min_confidence=0.5):
229
+ """
230
+ Detect common objects using COCO-trained model
231
+ Returns list of detected objects with confidence
232
+ """
233
+ import cv2
234
+ import numpy as np
235
+
236
+ # Load COCO class labels and model
237
+ classes = []
238
+ with open("models/coco.names", "r") as f:
239
+ classes = [line.strip() for line in f.readlines()]
240
+
241
+ model_file = "models/yolov3.weights"
242
+ config_file = "models/yolov3.cfg"
243
+ net = cv2.dnn.readNetFromDarknet(config_file, model_file)
244
+
245
+ (h, w) = frame.shape[:2]
246
+ blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416),
247
+ swapRB=True, crop=False)
248
+
249
+ net.setInput(blob)
250
+ layer_names = net.getLayerNames()
251
+ output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
252
+ outputs = net.forward(output_layers)
253
+
254
+ objects = []
255
+ for output in outputs:
256
+ for detection in output:
257
+ scores = detection[5:]
258
+ class_id = np.argmax(scores)
259
+ confidence = scores[class_id]
260
+
261
+ if confidence > min_confidence:
262
+ center_x = int(detection[0] * w)
263
+ center_y = int(detection[1] * h)
264
+ width = int(detection[2] * w)
265
+ height = int(detection[3] * h)
266
+
267
+ x = int(center_x - width / 2)
268
+ y = int(center_y - height / 2)
269
+
270
+ objects.append({
271
+ 'class': classes[class_id],
272
+ 'confidence': float(confidence),
273
+ 'box': (x, y, width, height)
274
+ })
275
+
276
+ return objects
277
+
278
+ def recognize_license_plate(frame):
279
+ """
280
+ Attempt to recognize license plate text
281
+ Returns detected text and confidence
282
+ """
283
+ import pytesseract
284
+ import cv2
285
+
286
+ # Preprocess frame for better OCR
287
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
288
+ blurred = cv2.GaussianBlur(gray, (5,5), 0)
289
+ thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
290
+
291
+ # Try to detect plates using contours
292
+ contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
293
+ contours = contours[0] if len(contours) == 2 else contours[1]
294
+
295
+ for c in contours:
296
+ x,y,w,h = cv2.boundingRect(c)
297
+ aspect_ratio = w / float(h)
298
+
299
+ # Check if contour matches typical plate aspect ratio
300
+ if 2 < aspect_ratio < 5 and w > 100 and h > 30:
301
+ plate_roi = frame[y:y+h, x:x+w]
302
+ text = pytesseract.image_to_string(plate_roi, config='--psm 8')
303
+ if text.strip():
304
+ return {
305
+ 'text': text.strip(),
306
+ 'box': (x,y,w,h)
307
+ }
308
+
309
+ return None
310
+
311
+
312
+ def extract_frames(video_path, frames_dir, frame_rate=1):
313
+ """
314
+ Extract frames from video at specified frame rate
315
+ Returns list of frame file paths
316
+ """
317
+ import cv2
318
+ import os
319
+
320
+ if not os.path.exists(frames_dir):
321
+ os.makedirs(frames_dir)
322
+
323
+ vidcap = cv2.VideoCapture(video_path)
324
+ fps = vidcap.get(cv2.CAP_PROP_FPS)
325
+ frame_interval = int(fps / frame_rate)
326
+
327
+ count = 0
328
+ frame_paths = []
329
+ success, image = vidcap.read()
330
+
331
+ while success:
332
+ if count % frame_interval == 0:
333
+ frame_path = os.path.join(frames_dir, f"frame_{count}.jpg")
334
+ cv2.imwrite(frame_path, image)
335
+ frame_paths.append(frame_path)
336
+ success, image = vidcap.read()
337
+ count += 1
338
+
339
+ return frame_paths
340
+
341
+ def apply_filters(frame, brightness=0, contrast=0, saturation=0, sharpness=0):
342
+ """
343
+ Apply enhancement filters to a frame
344
+ Returns processed frame
345
+ """
346
+ import cv2
347
+ import numpy as np
348
+
349
+ # Convert brightness/contrast values to OpenCV format
350
+ alpha = 1 + contrast/100
351
+ beta = brightness
352
+
353
+ # Apply brightness/contrast
354
+ frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=beta)
355
+
356
+ # Convert to HSV for saturation adjustment
357
+ hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
358
+ hsv[:,:,1] = hsv[:,:,1] * (1 + saturation/100)
359
+ frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
360
+
361
+ # Apply sharpening (unsharp mask)
362
+ if sharpness > 0:
363
+ blurred = cv2.GaussianBlur(frame, (0,0), 3)
364
+ frame = cv2.addWeighted(frame, 1 + sharpness/100, blurred, -sharpness/100, 0)
365
+
366
+ return frame
367
+
368
+ def stabilize_video(input_path, output_path):
369
+ """
370
+ Stabilize shaky video using OpenCV
371
+ Returns path to stabilized video
372
+ """
373
+ import cv2
374
+
375
+ # Implementation would use feature detection and motion estimation
376
+ # This is a simplified placeholder
377
+ cap = cv2.VideoCapture(input_path)
378
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
379
+ out = cv2.VideoWriter(output_path, fourcc, 30.0,
380
+ (int(cap.get(3)), int(cap.get(4))))
381
+
382
+ # Actual stabilization algorithm would go here
383
+ while cap.isOpened():
384
+ ret, frame = cap.read()
385
+ if not ret:
386
+ break
387
+ out.write(frame)
388
+
389
+ cap.release()
390
+ out.release()
391
+ return output_path
392
+
393
+
394
+ <!-- Add these script tags before the closing <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=Crazyka51/editor" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body> tag -->
395
+ <script src="python/video_processing.py"></script>
396
+ <script src="python/analysis_tools.py"></script>
397
+ <script src="python/metadata_utils.py"></script>
398
+ <script src="python/script_runner.py"></script>
399
+
400
  <!DOCTYPE html>
401
  <html lang="en">
402
  <head>
 
1352
  });
1353
  });
1354
  </script>
1355
+ </body>
1356
  </html>