Foydalanuvchi commited on
Commit
87df0c1
·
1 Parent(s): 3c727b4

Phase 11: DRY refactoring, NudeNet frame skip, Rate Limit, GC yaxshilash

Browse files
Files changed (2) hide show
  1. filters.py +186 -402
  2. main.py +50 -6
filters.py CHANGED
@@ -3,6 +3,12 @@ import numpy as np
3
  from PIL import Image, ImageEnhance
4
  import os
5
  import logging
 
 
 
 
 
 
6
 
7
  # Loglarni filters.py uchun ham alohida sozlash
8
  logger = logging.getLogger(__name__)
@@ -89,115 +95,77 @@ def upscale_image(image_path, output_path, scale=2):
89
  logger.error(f"Upscale rasm xatosi: {e}")
90
  return None
91
 
92
- def process_video_retro(video_path, output_path, progress_callback=None):
93
- """Videoga retro filtrini qo'llaydi."""
94
- import moviepy.video.VideoClip as mp_video
95
- from moviepy.video.io.VideoFileClip import VideoFileClip
96
-
97
  try:
98
  video_path = os.path.abspath(video_path)
99
  output_path = os.path.abspath(output_path)
100
 
101
  video = VideoFileClip(video_path)
102
- total_frames = int(video.fps * video.duration)
103
  current_frame = [0]
104
 
105
- def filter_frame(frame):
106
  current_frame[0] += 1
107
  if progress_callback and current_frame[0] % 15 == 0:
108
- percent = min(99, int((current_frame[0] / total_frames) * 100))
109
  progress_callback(percent)
 
110
 
111
- # Input frame is RGB
112
- if frame.shape[2] == 4:
113
- frame = frame[:, :, :3]
114
- elif len(frame.shape) == 2:
115
- frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
116
-
117
- bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
118
-
119
- sepia_filter = np.array([[0.272, 0.534, 0.131],
120
- [0.349, 0.686, 0.168],
121
- [0.393, 0.769, 0.189]])
122
- bgr = cv2.transform(bgr, sepia_filter)
123
- bgr = np.clip(bgr, 0, 255).astype(np.uint8)
124
-
125
- noise = np.random.randint(0, 15, bgr.shape, dtype='uint8')
126
- bgr = cv2.add(bgr, noise)
127
-
128
- return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
129
-
130
- processed_video = video.image_transform(filter_frame)
131
-
132
  processed_video.write_videofile(
133
  output_path,
134
  codec="libx264",
135
- audio_codec="aac",
136
- fps=video.fps or 24,
137
- preset="ultrafast", # Tezroq bo'lishi uchun
138
  threads=4,
139
- logger=None
 
140
  )
141
 
142
  video.close()
143
  processed_video.close()
144
-
145
- if os.path.exists(output_path):
146
- return output_path
147
- return None
148
  except Exception as e:
149
- logger.error(f"Video process xatosi: {e}")
150
  return None
151
 
152
- def process_video_upscale(video_path, output_path, progress_callback=None):
153
- """Videoning sifatini (o'lcham va o'tkirlik) oshiradi."""
154
- from moviepy.video.io.VideoFileClip import VideoFileClip
155
-
156
- try:
157
- video_path = os.path.abspath(video_path)
158
- output_path = os.path.abspath(output_path)
159
-
160
- video = VideoFileClip(video_path)
161
- total_frames = int(video.fps * video.duration)
162
- current_frame = [0]
163
-
164
- def upscale_frame(frame):
165
- current_frame[0] += 1
166
- if progress_callback and current_frame[0] % 15 == 0:
167
- percent = min(99, int((current_frame[0] / total_frames) * 100))
168
- progress_callback(percent)
169
-
170
- # RGB -> BGR
171
- bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
172
-
173
- # 1.5x kattalashtirish
174
- h, w = bgr.shape[:2]
175
- upscaled = cv2.resize(bgr, (int(w*1.5), int(h*1.5)), interpolation=cv2.INTER_LANCZOS4)
176
-
177
- # Sharpness
178
- gaussian_3 = cv2.GaussianBlur(upscaled, (0, 0), 2.0)
179
- unsharp_image = cv2.addWeighted(upscaled, 1.5, gaussian_3, -0.5, 0)
180
 
181
- return cv2.cvtColor(unsharp_image, cv2.COLOR_BGR2RGB)
 
 
 
 
 
 
 
 
 
 
 
182
 
183
- processed_video = video.image_transform(upscale_frame)
184
 
185
- processed_video.write_videofile(
186
- output_path,
187
- codec="libx264",
188
- audio_codec="aac",
189
- fps=video.fps or 24,
190
- preset="ultrafast",
191
- threads=4,
192
- logger=None
193
- )
194
 
195
- video.close()
196
- processed_video.close()
197
- return output_path if os.path.exists(output_path) else None
198
- except Exception as e:
199
- logger.error(f"Video upscale xatosi: {e}")
200
- return None
201
 
202
  def apply_face_restore(image_path, output_path):
203
  """Yuzlarni aniqlaydi va ularni tiniqlashtiradi (Face Fix)."""
@@ -288,29 +256,17 @@ def apply_auto_enhance(image_path, output_path):
288
 
289
  def process_video_slowmo(video_path, output_path, progress_callback=None):
290
  """Videoni 2x sekinlashtiradi (Slow Motion)."""
291
- from moviepy.video.io.VideoFileClip import VideoFileClip
292
-
293
  try:
294
  video_path = os.path.abspath(video_path)
295
  output_path = os.path.abspath(output_path)
296
-
297
  video = VideoFileClip(video_path)
298
-
299
- # 2x sekinlashtirish (tezlikni 0.5 ga kamaytirish)
300
  slow_video = video.with_speed_scaled(0.5)
301
-
302
  audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
303
 
304
  slow_video.write_videofile(
305
- output_path,
306
- codec="libx264",
307
- fps=video.fps or 24,
308
- preset="ultrafast",
309
- threads=4,
310
- logger=None,
311
- **audio_params
312
  )
313
-
314
  video.close()
315
  slow_video.close()
316
  return output_path if os.path.exists(output_path) else None
@@ -320,127 +276,51 @@ def process_video_slowmo(video_path, output_path, progress_callback=None):
320
 
321
  def process_video_bw(video_path, output_path, progress_callback=None):
322
  """Videoni oq-qora (B&W) holatga o'tkazadi."""
323
- from moviepy.video.io.VideoFileClip import VideoFileClip
324
-
325
- try:
326
- video_path = os.path.abspath(video_path)
327
- output_path = os.path.abspath(output_path)
328
 
329
- video = VideoFileClip(video_path)
330
- total_frames = int(video.fps * video.duration)
331
- current_frame = [0]
332
-
333
- def bw_frame(frame):
334
- current_frame[0] += 1
335
- if progress_callback and current_frame[0] % 15 == 0:
336
- percent = min(99, int((current_frame[0] / total_frames) * 100))
337
- progress_callback(percent)
338
-
339
- gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
340
- # Kontrastni biroz oshirish
341
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
342
- gray = clahe.apply(gray)
343
- return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
344
-
345
- processed = video.image_transform(bw_frame)
346
- audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
347
- processed.write_videofile(
348
- output_path,
349
- codec="libx264",
350
- fps=video.fps or 24,
351
- preset="ultrafast",
352
- threads=4,
353
- logger=None,
354
- **audio_params
355
- )
356
-
357
- video.close()
358
- processed.close()
359
- return output_path if os.path.exists(output_path) else None
360
- except Exception as e:
361
- logger.error(f"B&W video xatosi: {e}")
362
- return None
363
 
364
  def process_video_color_correct(video_path, output_path, progress_callback=None):
365
  """Video ranglarini avtomatik korreksiya qiladi."""
366
- from moviepy.video.io.VideoFileClip import VideoFileClip
367
-
368
- try:
369
- video_path = os.path.abspath(video_path)
370
- output_path = os.path.abspath(output_path)
371
 
372
- video = VideoFileClip(video_path)
373
- total_frames = int(video.fps * video.duration)
374
- current_frame = [0]
375
-
376
- def color_frame(frame):
377
- current_frame[0] += 1
378
- if progress_callback and current_frame[0] % 15 == 0:
379
- percent = min(99, int((current_frame[0] / total_frames) * 100))
380
- progress_callback(percent)
381
-
382
- bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
383
-
384
- # CLAHE yorug'lik
385
- lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
386
- l, a, b = cv2.split(lab)
387
- clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
388
- cl = clahe.apply(l)
389
- lab = cv2.merge((cl, a, b))
390
- bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
391
-
392
- # Unsharp mask
393
- gaussian = cv2.GaussianBlur(bgr, (0, 0), 2.0)
394
- bgr = cv2.addWeighted(bgr, 1.4, gaussian, -0.4, 0)
395
-
396
- # Saturation oshirish
397
- hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
398
- h, s, v = cv2.split(hsv)
399
- s = cv2.add(s, 20)
400
- hsv = cv2.merge((h, s, v))
401
- bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
402
-
403
- return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
404
-
405
- processed = video.image_transform(color_frame)
406
- audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
407
- processed.write_videofile(
408
- output_path,
409
- codec="libx264",
410
- fps=video.fps or 24,
411
- preset="ultrafast",
412
- threads=4,
413
- logger=None,
414
- **audio_params
415
- )
416
 
417
- video.close()
418
- processed.close()
419
- return output_path if os.path.exists(output_path) else None
420
- except Exception as e:
421
- logger.error(f"Color Correct xatosi: {e}")
422
- return None
 
 
 
 
 
 
423
 
424
  def process_video_remove_audio(video_path, output_path, progress_callback=None):
425
  """Videodan ovozni olib tashlaydi."""
426
- from moviepy.video.io.VideoFileClip import VideoFileClip
427
-
428
  try:
429
  video_path = os.path.abspath(video_path)
430
  output_path = os.path.abspath(output_path)
431
-
432
  video = VideoFileClip(video_path)
433
  muted = video.without_audio()
434
 
435
  muted.write_videofile(
436
- output_path,
437
- codec="libx264",
438
- fps=video.fps or 24,
439
- preset="ultrafast",
440
- threads=4,
441
- logger=None
442
  )
443
-
444
  video.close()
445
  muted.close()
446
  return output_path if os.path.exists(output_path) else None
@@ -450,29 +330,18 @@ def process_video_remove_audio(video_path, output_path, progress_callback=None):
450
 
451
  def process_video_trim(video_path, output_path, progress_callback=None, max_duration=15):
452
  """Videoning birinchi N soniyasini kesib oladi."""
453
- from moviepy.video.io.VideoFileClip import VideoFileClip
454
-
455
  try:
456
  video_path = os.path.abspath(video_path)
457
  output_path = os.path.abspath(output_path)
458
-
459
  video = VideoFileClip(video_path)
460
-
461
- # Agar video allaqachon qisqa bo'lsa, barini olish
462
  end_time = min(max_duration, video.duration)
463
  trimmed = video.subclipped(0, end_time)
464
 
465
  audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
466
  trimmed.write_videofile(
467
- output_path,
468
- codec="libx264",
469
- fps=video.fps or 24,
470
- preset="ultrafast",
471
- threads=4,
472
- logger=None,
473
- **audio_params
474
  )
475
-
476
  video.close()
477
  trimmed.close()
478
  return output_path if os.path.exists(output_path) else None
@@ -482,133 +351,62 @@ def process_video_trim(video_path, output_path, progress_callback=None, max_dura
482
 
483
  def process_video_face_fix(video_path, output_path, progress_callback=None):
484
  """Video kadrlardagi yuzlarni aniqlaydi va tiniqlashtiradi."""
485
- from moviepy.video.io.VideoFileClip import VideoFileClip
486
 
487
- try:
488
- video_path = os.path.abspath(video_path)
489
- output_path = os.path.abspath(output_path)
 
 
490
 
491
- video = VideoFileClip(video_path)
492
- total_frames = int(video.fps * video.duration)
493
- current_frame = [0]
494
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
495
-
496
- def face_frame(frame):
497
- current_frame[0] += 1
498
- if progress_callback and current_frame[0] % 15 == 0:
499
- percent = min(99, int((current_frame[0] / total_frames) * 100))
500
- progress_callback(percent)
501
 
502
- bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
503
- gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
504
- # HistEq
505
- gray = cv2.equalizeHist(gray)
506
- faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40))
507
 
508
- for (x, y, w, h) in faces:
509
- p = int(w * 0.1)
510
- x1, y1 = max(0, x - p), max(0, y - p)
511
- x2, y2 = min(bgr.shape[1], x + w + p), min(bgr.shape[0], y + h + p)
512
- face_roi = bgr[y1:y2, x1:x2]
513
-
514
- # Bilateral + Median + Sharpen
515
- smoothed = cv2.bilateralFilter(face_roi, 7, 50, 50)
516
- smoothed = cv2.medianBlur(smoothed, 3)
517
- gaussian = cv2.GaussianBlur(smoothed, (0, 0), 2.0)
518
- sharpened = cv2.addWeighted(smoothed, 1.8, gaussian, -0.8, 0)
519
-
520
- # CLAHE
521
- lab = cv2.cvtColor(sharpened, cv2.COLOR_BGR2LAB)
522
- l, a, b = cv2.split(lab)
523
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
524
- cl = clahe.apply(l)
525
- lab = cv2.merge((cl, a, b))
526
- face_roi = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
527
-
528
- bgr[y1:y2, x1:x2] = face_roi
529
 
530
- return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
531
-
532
- processed = video.image_transform(face_frame)
533
- audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
534
 
535
- processed.write_videofile(
536
- output_path,
537
- codec="libx264",
538
- fps=video.fps or 24,
539
- preset="ultrafast",
540
- threads=4,
541
- logger=None,
542
- **audio_params
543
- )
544
 
545
- video.close()
546
- processed.close()
547
- return output_path if os.path.exists(output_path) else None
548
- except Exception as e:
549
- logger.error(f"Video Face Fix xatosi: {e}")
550
- return None
551
 
552
  def process_video_auto_enhance(video_path, output_path, progress_callback=None):
553
  """Video ranglarini va yorug'ligini avtomatik yaxshilaydi."""
554
- from moviepy.video.io.VideoFileClip import VideoFileClip
555
-
556
- try:
557
- video_path = os.path.abspath(video_path)
558
- output_path = os.path.abspath(output_path)
559
 
560
- video = VideoFileClip(video_path)
561
- total_frames = int(video.fps * video.duration)
562
- current_frame = [0]
563
-
564
- def enhance_frame(frame):
565
- current_frame[0] += 1
566
- if progress_callback and current_frame[0] % 15 == 0:
567
- percent = min(99, int((current_frame[0] / total_frames) * 100))
568
- progress_callback(percent)
569
-
570
- bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
571
-
572
- # CLAHE
573
- lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
574
- l, a, b = cv2.split(lab)
575
- clahe = cv2.createCLAHE(clipLimit=2.5, tileGridSize=(8,8))
576
- cl = clahe.apply(l)
577
- lab = cv2.merge((cl, a, b))
578
- enhanced = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
579
-
580
- # Unsharp
581
- gaussian = cv2.GaussianBlur(enhanced, (0, 0), 1.5)
582
- enhanced = cv2.addWeighted(enhanced, 1.3, gaussian, -0.3, 0)
583
-
584
- # Saturation
585
- hsv = cv2.cvtColor(enhanced, cv2.COLOR_BGR2HSV)
586
- h, s, v = cv2.split(hsv)
587
- s = cv2.add(s, 15)
588
- hsv = cv2.merge((h, s, v))
589
- enhanced = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
590
-
591
- return cv2.cvtColor(enhanced, cv2.COLOR_BGR2RGB)
592
-
593
- processed = video.image_transform(enhance_frame)
594
- audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
595
 
596
- processed.write_videofile(
597
- output_path,
598
- codec="libx264",
599
- fps=video.fps or 24,
600
- preset="ultrafast",
601
- threads=4,
602
- logger=None,
603
- **audio_params
604
- )
605
-
606
- video.close()
607
- processed.close()
608
- return output_path if os.path.exists(output_path) else None
609
- except Exception as e:
610
- logger.error(f"Video Auto Enhance xatosi: {e}")
611
- return None
612
 
613
  def process_video_fps_boost(video_path, output_path, target_fps=60, progress_callback=None):
614
  """Videoni Optical Flow orqali silliq FPS ga ko'taradi (frame interpolation).
@@ -632,7 +430,6 @@ def process_video_fps_boost(video_path, output_path, target_fps=60, progress_cal
632
  # Agar video allaqachon target_fps dan yuqori bo'lsa
633
  if orig_fps >= target_fps:
634
  cap.release()
635
- from moviepy.video.io.VideoFileClip import VideoFileClip
636
  video = VideoFileClip(video_path)
637
  video.write_videofile(output_path, fps=target_fps, codec="libx264",
638
  audio_codec="aac", preset="medium",
@@ -730,7 +527,6 @@ def process_video_fps_boost(video_path, output_path, target_fps=60, progress_cal
730
 
731
  # Audio'ni biriktirish va yuqori sifatda saqlash
732
  try:
733
- from moviepy.video.io.VideoFileClip import VideoFileClip
734
  original = VideoFileClip(video_path)
735
  processed = VideoFileClip(temp_video_path)
736
 
@@ -769,47 +565,51 @@ def process_video_fps_boost(video_path, output_path, target_fps=60, progress_cal
769
  _nude_detector = None
770
 
771
  def get_nude_detector():
772
- """Nudity detektorini lazily (kerak bo'lganda) yuklash."""
773
  global _nude_detector
774
  if _nude_detector is None:
775
  try:
776
- from nudenet import NudeDetector
 
 
777
  _nude_detector = NudeDetector()
778
  logger.info("NudeNet modeli muvaffaqiyatli yuklandi.")
779
  except Exception as e:
780
- logger.error(f"NudeNet yuklashda xato: {e}")
 
781
  return _nude_detector
782
 
783
  def apply_nudenet_filter(image_path, output_path):
784
- """Rasmda NSFW hududlarni aniqlaydi va ularni xiralashtiradi."""
785
  try:
786
  detector = get_nude_detector()
787
  if not detector: return None
788
-
789
- # Deteksiya qilish
790
- detections = detector.detect(image_path)
791
 
 
 
 
 
 
 
 
792
  img = cv2.imread(image_path)
793
- if img is None: return None
794
-
795
- # NSFW hududlarni blur qilish
796
  h, w = img.shape[:2]
 
797
  for detection in detections:
798
- box = detection['box']
799
- label = detection['label']
800
- score = detection['score']
801
 
802
- if score > 0.4: # Ishonchliligi 40% dan yuqori bo'lsa
803
- x1, y1, x2, y2 = map(int, box)
804
- x1, y1 = max(0, y1), max(0, x1) # NudeNet box [y1, x1, y2, x2]? Yo'q, odatda [x, y, x+w, y+h]
805
- # NudeNet 3.0 box: [left, top, right, bottom] -> [x1, y1, x2, y2]
806
- x1, y1 = max(0, x1), max(0, y1)
807
- x2, y2 = min(w, x2), min(h, y2)
808
-
809
- roi = img[y1:y2, x1:x2]
810
- if roi.size > 0:
811
- img[y1:y2, x1:x2] = cv2.GaussianBlur(roi, (51, 51), 30)
812
- logger.info(f"NSFW aniqlandi ({label}) va xiralashtirildi.")
813
 
814
  cv2.imwrite(output_path, img)
815
  return output_path if os.path.exists(output_path) else None
@@ -819,66 +619,50 @@ def apply_nudenet_filter(image_path, output_path):
819
 
820
  def process_video_nnsfw(video_path, output_path, progress_callback=None):
821
  """Videoda kadrbay-kadr NSFW senzurasini amalga oshiradi."""
822
- from moviepy.video.io.VideoFileClip import VideoFileClip
823
-
824
  try:
825
  detector = get_nude_detector()
826
- if not detector: return None
 
827
 
828
- video = VideoFileClip(video_path)
829
- total_frames = int(video.fps * video.duration)
830
  current_frame = [0]
 
831
 
832
  def censor_frame(frame):
833
  current_frame[0] += 1
834
- if progress_callback and current_frame[0] % 15 == 0:
835
- percent = min(99, int((current_frame[0] / total_frames) * 100))
836
- progress_callback(percent)
837
-
838
- # RGB -> BGR
839
  bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
840
  h, w = bgr.shape[:2]
841
 
842
- # Har bir kadrni diskka yozish juda sekin.
843
- # NudeNet 3.0 detector.detect() faqat path qabul qiladi.
844
- # Optimizatsiya: kadrni xotirada saqlab, vaqtinchalik fayl qilib yuboramiz.
845
- # Diqqat: Bu jarayon juda sekin bo'lishi mumkin!
846
-
847
- temp_path = f"tmp_f_{os.getpid()}.jpg"
848
- cv2.imwrite(temp_path, bgr)
849
- detections = detector.detect(temp_path)
850
-
851
- for d in detections:
852
- if d['score'] > 0.4:
853
- x1, y1, x2, y2 = map(int, d['box'])
854
- x1, y1 = max(0, x1), max(0, y1)
855
- x2, y2 = min(w, x2), min(h, y2)
856
- roi = bgr[y1:y2, x1:x2]
857
- if roi.size > 0:
858
- bgr[y1:y2, x1:x2] = cv2.GaussianBlur(roi, (51, 51), 30)
859
-
860
- if os.path.exists(temp_path):
861
- try: os.remove(temp_path)
862
- except: pass
 
 
 
 
 
863
 
864
  return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
865
 
866
- processed = video.image_transform(censor_frame)
867
- audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
868
-
869
- processed.write_videofile(
870
- output_path,
871
- codec="libx264",
872
- fps=video.fps or 24,
873
- preset="ultrafast",
874
- threads=4,
875
- logger=None,
876
- **audio_params
877
- )
878
-
879
- video.close()
880
- processed.close()
881
- return output_path if os.path.exists(output_path) else None
882
  except Exception as e:
883
  logger.error(f"Video NSFW xatosi: {e}")
884
  return None
 
3
  from PIL import Image, ImageEnhance
4
  import os
5
  import logging
6
+ from moviepy.video.io.VideoFileClip import VideoFileClip
7
+
8
+ try:
9
+ from nudenet import NudeDetector
10
+ except ImportError:
11
+ NudeDetector = None
12
 
13
  # Loglarni filters.py uchun ham alohida sozlash
14
  logger = logging.getLogger(__name__)
 
95
  logger.error(f"Upscale rasm xatosi: {e}")
96
  return None
97
 
98
+ def _apply_video_transform(video_path, output_path, transform_func, progress_callback=None, fps_override=None):
99
+ """Barcha video filtrlar uchun umumiy ishlov beruvchi (DRY qoidasi uchun)."""
 
 
 
100
  try:
101
  video_path = os.path.abspath(video_path)
102
  output_path = os.path.abspath(output_path)
103
 
104
  video = VideoFileClip(video_path)
105
+ total_frames = int((fps_override or video.fps) * video.duration)
106
  current_frame = [0]
107
 
108
+ def wrapped_transform(frame):
109
  current_frame[0] += 1
110
  if progress_callback and current_frame[0] % 15 == 0:
111
+ percent = min(99, int((current_frame[0] / max(1, total_frames)) * 100))
112
  progress_callback(percent)
113
+ return transform_func(frame)
114
 
115
+ processed_video = video.image_transform(wrapped_transform)
116
+ audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
117
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  processed_video.write_videofile(
119
  output_path,
120
  codec="libx264",
121
+ fps=fps_override or video.fps or 24,
122
+ preset="ultrafast",
 
123
  threads=4,
124
+ logger=None,
125
+ **audio_params
126
  )
127
 
128
  video.close()
129
  processed_video.close()
130
+ return output_path if os.path.exists(output_path) else None
 
 
 
131
  except Exception as e:
132
+ logger.error(f"Video transform xatosi: {e}")
133
  return None
134
 
135
+ def process_video_retro(video_path, output_path, progress_callback=None):
136
+ """Videoga retro filtrini qo'llaydi."""
137
+ def filter_frame(frame):
138
+ if frame.shape[2] == 4:
139
+ frame = frame[:, :, :3]
140
+ elif len(frame.shape) == 2:
141
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
+ bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
144
+ sepia_filter = np.array([[0.272, 0.534, 0.131],
145
+ [0.349, 0.686, 0.168],
146
+ [0.393, 0.769, 0.189]])
147
+ bgr = cv2.transform(bgr, sepia_filter)
148
+ bgr = np.clip(bgr, 0, 255).astype(np.uint8)
149
+
150
+ noise = np.random.randint(0, 15, bgr.shape, dtype='uint8')
151
+ bgr = cv2.add(bgr, noise)
152
+ return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
153
+
154
+ return _apply_video_transform(video_path, output_path, filter_frame, progress_callback)
155
 
 
156
 
157
+ def process_video_upscale(video_path, output_path, progress_callback=None):
158
+ """Videoning sifatini (o'lcham va o'tkirlik) oshiradi."""
159
+ def upscale_frame(frame):
160
+ bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
161
+ h, w = bgr.shape[:2]
162
+ upscaled = cv2.resize(bgr, (int(w*1.5), int(h*1.5)), interpolation=cv2.INTER_LANCZOS4)
 
 
 
163
 
164
+ gaussian_3 = cv2.GaussianBlur(upscaled, (0, 0), 2.0)
165
+ unsharp_image = cv2.addWeighted(upscaled, 1.5, gaussian_3, -0.5, 0)
166
+ return cv2.cvtColor(unsharp_image, cv2.COLOR_BGR2RGB)
167
+
168
+ return _apply_video_transform(video_path, output_path, upscale_frame, progress_callback)
 
169
 
170
  def apply_face_restore(image_path, output_path):
171
  """Yuzlarni aniqlaydi va ularni tiniqlashtiradi (Face Fix)."""
 
256
 
257
  def process_video_slowmo(video_path, output_path, progress_callback=None):
258
  """Videoni 2x sekinlashtiradi (Slow Motion)."""
 
 
259
  try:
260
  video_path = os.path.abspath(video_path)
261
  output_path = os.path.abspath(output_path)
 
262
  video = VideoFileClip(video_path)
 
 
263
  slow_video = video.with_speed_scaled(0.5)
 
264
  audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
265
 
266
  slow_video.write_videofile(
267
+ output_path, codec="libx264", fps=video.fps or 24, preset="ultrafast",
268
+ threads=4, logger=None, **audio_params
 
 
 
 
 
269
  )
 
270
  video.close()
271
  slow_video.close()
272
  return output_path if os.path.exists(output_path) else None
 
276
 
277
  def process_video_bw(video_path, output_path, progress_callback=None):
278
  """Videoni oq-qora (B&W) holatga o'tkazadi."""
279
+ def bw_frame(frame):
280
+ gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
281
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
282
+ gray = clahe.apply(gray)
283
+ return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
284
 
285
+ return _apply_video_transform(video_path, output_path, bw_frame, progress_callback)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
 
287
  def process_video_color_correct(video_path, output_path, progress_callback=None):
288
  """Video ranglarini avtomatik korreksiya qiladi."""
289
+ def color_frame(frame):
290
+ bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
 
 
 
291
 
292
+ lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
293
+ l, a, b = cv2.split(lab)
294
+ clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
295
+ cl = clahe.apply(l)
296
+ lab = cv2.merge((cl, a, b))
297
+ bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
 
299
+ gaussian = cv2.GaussianBlur(bgr, (0, 0), 2.0)
300
+ bgr = cv2.addWeighted(bgr, 1.4, gaussian, -0.4, 0)
301
+
302
+ hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
303
+ h, s, v = cv2.split(hsv)
304
+ s = cv2.add(s, 20)
305
+ hsv = cv2.merge((h, s, v))
306
+ bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
307
+
308
+ return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
309
+
310
+ return _apply_video_transform(video_path, output_path, color_frame, progress_callback)
311
 
312
  def process_video_remove_audio(video_path, output_path, progress_callback=None):
313
  """Videodan ovozni olib tashlaydi."""
 
 
314
  try:
315
  video_path = os.path.abspath(video_path)
316
  output_path = os.path.abspath(output_path)
 
317
  video = VideoFileClip(video_path)
318
  muted = video.without_audio()
319
 
320
  muted.write_videofile(
321
+ output_path, codec="libx264", fps=video.fps or 24,
322
+ preset="ultrafast", threads=4, logger=None
 
 
 
 
323
  )
 
324
  video.close()
325
  muted.close()
326
  return output_path if os.path.exists(output_path) else None
 
330
 
331
  def process_video_trim(video_path, output_path, progress_callback=None, max_duration=15):
332
  """Videoning birinchi N soniyasini kesib oladi."""
 
 
333
  try:
334
  video_path = os.path.abspath(video_path)
335
  output_path = os.path.abspath(output_path)
 
336
  video = VideoFileClip(video_path)
 
 
337
  end_time = min(max_duration, video.duration)
338
  trimmed = video.subclipped(0, end_time)
339
 
340
  audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
341
  trimmed.write_videofile(
342
+ output_path, codec="libx264", fps=video.fps or 24,
343
+ preset="ultrafast", threads=4, logger=None, **audio_params
 
 
 
 
 
344
  )
 
345
  video.close()
346
  trimmed.close()
347
  return output_path if os.path.exists(output_path) else None
 
351
 
352
  def process_video_face_fix(video_path, output_path, progress_callback=None):
353
  """Video kadrlardagi yuzlarni aniqlaydi va tiniqlashtiradi."""
354
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
355
 
356
+ def face_frame(frame):
357
+ bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
358
+ gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
359
+ gray = cv2.equalizeHist(gray)
360
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40))
361
 
362
+ for (x, y, w, h) in faces:
363
+ p = int(w * 0.1)
364
+ x1, y1 = max(0, x - p), max(0, y - p)
365
+ x2, y2 = min(bgr.shape[1], x + w + p), min(bgr.shape[0], y + h + p)
366
+ face_roi = bgr[y1:y2, x1:x2]
 
 
 
 
 
367
 
368
+ smoothed = cv2.bilateralFilter(face_roi, 7, 50, 50)
369
+ smoothed = cv2.medianBlur(smoothed, 3)
370
+ gaussian = cv2.GaussianBlur(smoothed, (0, 0), 2.0)
371
+ sharpened = cv2.addWeighted(smoothed, 1.8, gaussian, -0.8, 0)
 
372
 
373
+ lab = cv2.cvtColor(sharpened, cv2.COLOR_BGR2LAB)
374
+ l, a, b = cv2.split(lab)
375
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
376
+ cl = clahe.apply(l)
377
+ lab = cv2.merge((cl, a, b))
378
+ face_roi = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
 
380
+ bgr[y1:y2, x1:x2] = face_roi
 
 
 
381
 
382
+ return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
 
 
 
 
 
 
 
 
383
 
384
+ return _apply_video_transform(video_path, output_path, face_frame, progress_callback)
 
 
 
 
 
385
 
386
  def process_video_auto_enhance(video_path, output_path, progress_callback=None):
387
  """Video ranglarini va yorug'ligini avtomatik yaxshilaydi."""
388
+ def enhance_frame(frame):
389
+ bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
 
 
 
390
 
391
+ lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
392
+ l, a, b = cv2.split(lab)
393
+ clahe = cv2.createCLAHE(clipLimit=2.5, tileGridSize=(8,8))
394
+ cl = clahe.apply(l)
395
+ lab = cv2.merge((cl, a, b))
396
+ enhanced = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
 
398
+ gaussian = cv2.GaussianBlur(enhanced, (0, 0), 1.5)
399
+ enhanced = cv2.addWeighted(enhanced, 1.3, gaussian, -0.3, 0)
400
+
401
+ hsv = cv2.cvtColor(enhanced, cv2.COLOR_BGR2HSV)
402
+ h, s, v = cv2.split(hsv)
403
+ s = cv2.add(s, 15)
404
+ hsv = cv2.merge((h, s, v))
405
+ enhanced = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
406
+
407
+ return cv2.cvtColor(enhanced, cv2.COLOR_BGR2RGB)
408
+
409
+ return _apply_video_transform(video_path, output_path, enhance_frame, progress_callback)
 
 
 
 
410
 
411
  def process_video_fps_boost(video_path, output_path, target_fps=60, progress_callback=None):
412
  """Videoni Optical Flow orqali silliq FPS ga ko'taradi (frame interpolation).
 
430
  # Agar video allaqachon target_fps dan yuqori bo'lsa
431
  if orig_fps >= target_fps:
432
  cap.release()
 
433
  video = VideoFileClip(video_path)
434
  video.write_videofile(output_path, fps=target_fps, codec="libx264",
435
  audio_codec="aac", preset="medium",
 
527
 
528
  # Audio'ni biriktirish va yuqori sifatda saqlash
529
  try:
 
530
  original = VideoFileClip(video_path)
531
  processed = VideoFileClip(temp_video_path)
532
 
 
565
  _nude_detector = None
566
 
567
  def get_nude_detector():
568
+ """Nudity detektorini faqat kerak bo'lganda yuklaydi."""
569
  global _nude_detector
570
  if _nude_detector is None:
571
  try:
572
+ if NudeDetector is None:
573
+ logger.error("NudeNet kutubxonasi o'rnatilmagan.")
574
+ return None
575
  _nude_detector = NudeDetector()
576
  logger.info("NudeNet modeli muvaffaqiyatli yuklandi.")
577
  except Exception as e:
578
+ logger.error(f"NudeNetni yuklashda xatolik: {e}")
579
+ return None
580
  return _nude_detector
581
 
582
  def apply_nudenet_filter(image_path, output_path):
583
+ """Rasmda NSFW hududlarni aniqlaydi va blur (xiralashish) qo'llaydi."""
584
  try:
585
  detector = get_nude_detector()
586
  if not detector: return None
 
 
 
587
 
588
+ image_path = os.path.abspath(image_path)
589
+ output_path = os.path.abspath(output_path)
590
+
591
+ detections = detector.detect(image_path)
592
+ if not detections:
593
+ return image_path
594
+
595
  img = cv2.imread(image_path)
 
 
 
596
  h, w = img.shape[:2]
597
+
598
  for detection in detections:
599
+ score = detection.get('score', 0)
600
+ label = detection.get('class', '')
 
601
 
602
+ if score > 0.4:
603
+ box = detection.get('box', [])
604
+ if len(box) == 4:
605
+ x1, y1, x2, y2 = map(int, box)
606
+ x1, y1 = max(0, x1), max(0, y1)
607
+ x2, y2 = min(w, x2), min(h, y2)
608
+
609
+ roi = img[y1:y2, x1:x2]
610
+ if roi.size > 0:
611
+ img[y1:y2, x1:x2] = cv2.GaussianBlur(roi, (51, 51), 30)
612
+ logger.info(f"NSFW aniqlandi ({label}) va xiralashtirildi.")
613
 
614
  cv2.imwrite(output_path, img)
615
  return output_path if os.path.exists(output_path) else None
 
619
 
620
  def process_video_nnsfw(video_path, output_path, progress_callback=None):
621
  """Videoda kadrbay-kadr NSFW senzurasini amalga oshiradi."""
 
 
622
  try:
623
  detector = get_nude_detector()
624
+ if detector is None:
625
+ return None
626
 
627
+ skip_frames = 12 # Har 12 kadrda 1 marta tekshiriladi
 
628
  current_frame = [0]
629
+ current_detections = [[]]
630
 
631
  def censor_frame(frame):
632
  current_frame[0] += 1
 
 
 
 
 
633
  bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
634
  h, w = bgr.shape[:2]
635
 
636
+ # Faqat ma'lum kadrlarda nudenet ni chaqirish
637
+ if current_frame[0] % skip_frames == 1:
638
+ temp_path = f"tmp_f_{os.getpid()}_{current_frame[0]}.jpg"
639
+ cv2.imwrite(temp_path, bgr)
640
+ try:
641
+ detections = detector.detect(temp_path) if detector else []
642
+ current_detections[0] = detections if isinstance(detections, list) else []
643
+ except Exception as e:
644
+ logger.warning(f"NudeNet detect xatosi: {e}")
645
+ current_detections[0] = []
646
+
647
+ if os.path.exists(temp_path):
648
+ try: os.remove(temp_path)
649
+ except: pass
650
+
651
+ # Keshdagi oxirgi topilmalar bo'yicha blur qo'llash
652
+ for d in current_detections[0]:
653
+ if isinstance(d, dict) and d.get('score', 0) > 0.4:
654
+ box = d.get('box')
655
+ if isinstance(box, list) and len(box) == 4:
656
+ x1, y1, x2, y2 = map(int, box)
657
+ x1, y1 = max(0, x1), max(0, y1)
658
+ x2, y2 = min(w, x2), min(h, y2)
659
+ roi = bgr[y1:y2, x1:x2]
660
+ if roi.size > 0:
661
+ bgr[y1:y2, x1:x2] = cv2.GaussianBlur(roi, (51, 51), 30)
662
 
663
  return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
664
 
665
+ return _apply_video_transform(video_path, output_path, censor_frame, progress_callback)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666
  except Exception as e:
667
  logger.error(f"Video NSFW xatosi: {e}")
668
  return None
main.py CHANGED
@@ -76,6 +76,19 @@ processing_semaphore = asyncio.Semaphore(2)
76
  executor = ThreadPoolExecutor(max_workers=4)
77
  base_dir = os.path.dirname(os.path.abspath(__file__))
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  # Scheduler for cleanup
80
  scheduler = AsyncIOScheduler()
81
 
@@ -86,17 +99,32 @@ async def post_init(application):
86
  logger.info("Scheduler va tozalash tizimi ishga tushdi.")
87
 
88
  async def cleanup_old_files():
89
- """Eski fayllarni va xotirani tozalash."""
90
  now = datetime.now()
 
 
91
  to_delete = [k for k, v in media_storage.items() if (now - v.get('timestamp', now)) > timedelta(hours=1)]
92
  for k in to_delete:
93
- media_storage.pop(k, None) # Lint fix
94
 
 
 
 
 
 
 
95
  for f in os.listdir(base_dir):
96
- if (f.startswith("in_") or f.startswith("out_")) and (now - datetime.fromtimestamp(os.path.getmtime(os.path.join(base_dir, f)))) > timedelta(hours=1):
 
97
  try:
98
- os.remove(os.path.join(base_dir, f))
 
 
 
99
  except: pass
 
 
 
100
 
101
  # --- UI Helpers ---
102
 
@@ -322,6 +350,12 @@ async def button_handler(update: Update, context: ContextTypes.DEFAULT_TYPE):
322
  await query.edit_message_text("⚠️ Media ma'lumotlari topilmadi. Iltimos, qayta yuboring.")
323
  return
324
 
 
 
 
 
 
 
325
  async with processing_semaphore:
326
  media_info = media_storage[short_id]
327
  file_id = media_info["file_id"]
@@ -418,11 +452,21 @@ async def button_handler(update: Update, context: ContextTypes.DEFAULT_TYPE):
418
  try: await context.bot.send_message(query.message.chat_id, f"❌ Tizimli xatolik: {e}")
419
  except: pass
420
  finally:
 
421
  try:
422
- if os.path.exists(input_path): os.remove(input_path)
423
- if os.path.exists(output_path): os.remove(output_path)
424
  except: pass
425
 
 
 
 
 
 
 
 
 
 
 
426
  # Xotirani majburiy tozalash (RAM to'lishini oldini oladi)
427
  gc.collect()
428
 
 
76
  executor = ThreadPoolExecutor(max_workers=4)
77
  base_dir = os.path.dirname(os.path.abspath(__file__))
78
 
79
+ # Rate Limit (Spam himoyasi)
80
+ user_active_tasks = {} # {user_id: timestamp}
81
+ RATE_LIMIT_SECONDS = 5 # Minimum kutish vaqti (soniya)
82
+
83
+ def check_rate_limit(user_id):
84
+ """Foydalanuvchi spam qilayotganini tekshiradi. True = ruxsat, False = bloklangan."""
85
+ now = time.time()
86
+ last_time = user_active_tasks.get(user_id)
87
+ if last_time and (now - last_time) < RATE_LIMIT_SECONDS:
88
+ return False
89
+ user_active_tasks[user_id] = now
90
+ return True
91
+
92
  # Scheduler for cleanup
93
  scheduler = AsyncIOScheduler()
94
 
 
99
  logger.info("Scheduler va tozalash tizimi ishga tushdi.")
100
 
101
  async def cleanup_old_files():
102
+ """Eski fayllarni, vaqtinchalik fayllarni va xotirani tozalash."""
103
  now = datetime.now()
104
+
105
+ # 1. Eskirgan media_storage yozuvlarini tozalash
106
  to_delete = [k for k, v in media_storage.items() if (now - v.get('timestamp', now)) > timedelta(hours=1)]
107
  for k in to_delete:
108
+ media_storage.pop(k, None)
109
 
110
+ # 2. Eskirgan user_active_tasks ni tozalash
111
+ stale_users = [uid for uid, ts in user_active_tasks.items() if (time.time() - ts) > 300]
112
+ for uid in stale_users:
113
+ user_active_tasks.pop(uid, None)
114
+
115
+ # 3. in_/out_/tmp_ fayllarni tozalash
116
  for f in os.listdir(base_dir):
117
+ fpath = os.path.join(base_dir, f)
118
+ if (f.startswith("in_") or f.startswith("out_") or f.startswith("tmp_")):
119
  try:
120
+ file_age = now - datetime.fromtimestamp(os.path.getmtime(fpath))
121
+ if file_age > timedelta(minutes=30):
122
+ os.remove(fpath)
123
+ logger.info(f"Tozalandi: {f}")
124
  except: pass
125
+
126
+ gc.collect()
127
+ logger.info(f"Tozalash yakunlandi. media_storage: {len(media_storage)}, active_tasks: {len(user_active_tasks)}")
128
 
129
  # --- UI Helpers ---
130
 
 
350
  await query.edit_message_text("⚠️ Media ma'lumotlari topilmadi. Iltimos, qayta yuboring.")
351
  return
352
 
353
+ # Spam himoyasi: foydalanuvchi juda tez-tez so'rov yuborsa bloklash
354
+ user_id = query.from_user.id
355
+ if not check_rate_limit(user_id):
356
+ await query.edit_message_text("⏳ Sizda tugallanmagan jarayon bor yoki juda tez so'rov yuboryapsiz. Biroz kuting!")
357
+ return
358
+
359
  async with processing_semaphore:
360
  media_info = media_storage[short_id]
361
  file_id = media_info["file_id"]
 
452
  try: await context.bot.send_message(query.message.chat_id, f"❌ Tizimli xatolik: {e}")
453
  except: pass
454
  finally:
455
+ # Rate limit tozalash
456
  try:
457
+ user_active_tasks.pop(query.from_user.id, None)
 
458
  except: pass
459
 
460
+ # Fayllarni tozalash
461
+ for cleanup_path in [locals().get('input_path'), locals().get('output_path')]:
462
+ if cleanup_path and os.path.exists(cleanup_path):
463
+ try: os.remove(cleanup_path)
464
+ except: pass
465
+
466
+ # media_storage dan eskirgan yozuvlarni tozalash
467
+ if short_id in media_storage:
468
+ media_storage.pop(short_id, None)
469
+
470
  # Xotirani majburiy tozalash (RAM to'lishini oldini oladi)
471
  gc.collect()
472