Foydalanuvchi commited on
Commit
1203b34
Β·
1 Parent(s): 5ebc955

Phase 13: 10 ta yangi AI-Powered pro funksiyalar (Glitch, BG Remove, Style Transfer, Subtitle Translate va hk.) kiritildi

Browse files
Files changed (3) hide show
  1. filters.py +376 -0
  2. main.py +69 -8
  3. requirements.txt +3 -0
filters.py CHANGED
@@ -895,3 +895,379 @@ def process_video_subtitle(video_path, output_path, progress_callback=None):
895
  if os.path.exists(temp_audio_path):
896
  try: os.remove(temp_audio_path)
897
  except: pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
895
  if os.path.exists(temp_audio_path):
896
  try: os.remove(temp_audio_path)
897
  except: pass
898
+
899
+ # ============================================================
900
+ # PHASE 13: GLITCH / MIRROR / WATERMARK / BG REMOVE / STYLE
901
+ # ============================================================
902
+
903
+ def apply_glitch_filter(image_path, output_path):
904
+ """Rasmga Glitch effekti qo'llaydi (RGB siljishi + noise)."""
905
+ try:
906
+ img = cv2.imread(os.path.abspath(image_path))
907
+ if img is None: return None
908
+ h, w = img.shape[:2]
909
+
910
+ result = img.copy()
911
+ # RGB kanallari siljishi
912
+ shift = max(5, int(w * 0.02))
913
+ # Qizil kanalni o'ngga siljitish
914
+ result[:, shift:, 2] = img[:, :-shift, 2]
915
+ # Ko'k kanalni chapga siljitish
916
+ result[:, :-shift, 0] = img[:, shift:, 0]
917
+
918
+ # Gorizontal chiziqlar (scan lines)
919
+ for y in range(0, h, max(3, h // 80)):
920
+ offset = np.random.randint(-shift, shift)
921
+ if 0 < y + 2 < h:
922
+ row = result[y:y+2].copy()
923
+ result[y:y+2] = np.roll(row, offset, axis=1)
924
+
925
+ # Noise qo'shish
926
+ noise = np.random.randint(0, 25, img.shape, dtype='uint8')
927
+ result = cv2.add(result, noise)
928
+
929
+ cv2.imwrite(os.path.abspath(output_path), result)
930
+ return output_path
931
+ except Exception as e:
932
+ logger.error(f"Glitch filter xatosi: {e}")
933
+ return None
934
+
935
+ def apply_mirror_filter(image_path, output_path):
936
+ """Rasmga Mirror (oyna) effekti qo'llaydi."""
937
+ try:
938
+ img = cv2.imread(os.path.abspath(image_path))
939
+ if img is None: return None
940
+ h, w = img.shape[:2]
941
+
942
+ # Chap yarmini olib, o'ng tomoniga aks ettirish
943
+ left_half = img[:, :w//2]
944
+ mirrored = cv2.flip(left_half, 1)
945
+ result = img.copy()
946
+ result[:, w//2:w//2+mirrored.shape[1]] = mirrored
947
+
948
+ cv2.imwrite(os.path.abspath(output_path), result)
949
+ return output_path
950
+ except Exception as e:
951
+ logger.error(f"Mirror filter xatosi: {e}")
952
+ return None
953
+
954
+ def process_video_glitch(video_path, output_path, progress_callback=None):
955
+ """Videoga Glitch effekti qo'llaydi."""
956
+ def glitch_frame(frame):
957
+ h, w = frame.shape[:2]
958
+ result = frame.copy()
959
+ shift = max(5, int(w * 0.02))
960
+
961
+ result[:, shift:, 0] = frame[:, :-shift, 0] # R shift
962
+ result[:, :-shift, 2] = frame[:, shift:, 2] # B shift
963
+
964
+ for y in range(0, h, max(3, h // 80)):
965
+ offset = np.random.randint(-shift, shift)
966
+ if 0 < y + 2 < h:
967
+ row = result[y:y+2].copy()
968
+ result[y:y+2] = np.roll(row, offset, axis=1)
969
+
970
+ noise = np.random.randint(0, 15, frame.shape, dtype='uint8')
971
+ result = cv2.add(result, noise)
972
+ return result
973
+
974
+ return _apply_video_transform(video_path, output_path, glitch_frame, progress_callback)
975
+
976
+ def process_video_mirror(video_path, output_path, progress_callback=None):
977
+ """Videoga Mirror effekti qo'llaydi."""
978
+ def mirror_frame(frame):
979
+ h, w = frame.shape[:2]
980
+ left_half = frame[:, :w//2]
981
+ mirrored = cv2.flip(left_half, 1)
982
+ result = frame.copy()
983
+ result[:, w//2:w//2+mirrored.shape[1]] = mirrored
984
+ return result
985
+
986
+ return _apply_video_transform(video_path, output_path, mirror_frame, progress_callback)
987
+
988
+ def apply_watermark(image_path, output_path, text="@editfiltrbot"):
989
+ """Rasmga watermark (suv belgisi) qo'shadi."""
990
+ try:
991
+ from PIL import ImageFont, ImageDraw
992
+ img = Image.open(os.path.abspath(image_path)).convert("RGBA")
993
+ w, h = img.size
994
+
995
+ # Shaffof qatlam yaratish
996
+ overlay = Image.new("RGBA", img.size, (0, 0, 0, 0))
997
+ draw = ImageDraw.Draw(overlay)
998
+
999
+ font_size = max(14, int(min(w, h) * 0.04))
1000
+ font_path = None
1001
+ for fp in ["/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
1002
+ "C:/Windows/Fonts/arialbd.ttf"]:
1003
+ if os.path.exists(fp):
1004
+ font_path = fp
1005
+ break
1006
+
1007
+ try:
1008
+ font = ImageFont.truetype(font_path, font_size) if font_path else ImageFont.load_default()
1009
+ except:
1010
+ font = ImageFont.load_default()
1011
+
1012
+ bbox = draw.textbbox((0, 0), text, font=font)
1013
+ tw, th = bbox[2] - bbox[0], bbox[3] - bbox[1]
1014
+ x = w - tw - 15
1015
+ y = h - th - 15
1016
+
1017
+ # Yarim-shaffof oq matn + qora soya
1018
+ draw.text((x+1, y+1), text, font=font, fill=(0, 0, 0, 120))
1019
+ draw.text((x, y), text, font=font, fill=(255, 255, 255, 160))
1020
+
1021
+ result = Image.alpha_composite(img, overlay).convert("RGB")
1022
+ result.save(os.path.abspath(output_path))
1023
+ return output_path
1024
+ except Exception as e:
1025
+ logger.error(f"Watermark xatosi: {e}")
1026
+ return None
1027
+
1028
+ def process_video_watermark(video_path, output_path, progress_callback=None):
1029
+ """Videoga watermark qo'shadi."""
1030
+ from PIL import ImageFont, ImageDraw
1031
+
1032
+ # Shrift va matn bir marta tayyorlanadi
1033
+ text = "@editfiltrbot"
1034
+ font_path = None
1035
+ for fp in ["/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
1036
+ "C:/Windows/Fonts/arialbd.ttf"]:
1037
+ if os.path.exists(fp):
1038
+ font_path = fp
1039
+ break
1040
+
1041
+ def wm_frame(frame):
1042
+ h, w = frame.shape[:2]
1043
+ pil_img = Image.fromarray(frame).convert("RGBA")
1044
+ overlay = Image.new("RGBA", pil_img.size, (0, 0, 0, 0))
1045
+ draw = ImageDraw.Draw(overlay)
1046
+
1047
+ font_size = max(14, int(min(w, h) * 0.04))
1048
+ try:
1049
+ font = ImageFont.truetype(font_path, font_size) if font_path else ImageFont.load_default()
1050
+ except:
1051
+ font = ImageFont.load_default()
1052
+
1053
+ bbox = draw.textbbox((0, 0), text, font=font)
1054
+ tw, th = bbox[2] - bbox[0], bbox[3] - bbox[1]
1055
+ x, y = w - tw - 15, h - th - 15
1056
+
1057
+ draw.text((x+1, y+1), text, font=font, fill=(0, 0, 0, 120))
1058
+ draw.text((x, y), text, font=font, fill=(255, 255, 255, 160))
1059
+
1060
+ result = Image.alpha_composite(pil_img, overlay).convert("RGB")
1061
+ return np.array(result)
1062
+
1063
+ return _apply_video_transform(video_path, output_path, wm_frame, progress_callback)
1064
+
1065
+ def apply_bg_remove(image_path, output_path):
1066
+ """Rasmdan fonni olib tashlaydi (shaffof PNG)."""
1067
+ try:
1068
+ from rembg import remove
1069
+
1070
+ with open(os.path.abspath(image_path), 'rb') as inp:
1071
+ input_data = inp.read()
1072
+
1073
+ output_data = remove(input_data)
1074
+
1075
+ # PNG sifatida saqlash (shaffof fon)
1076
+ png_path = os.path.abspath(output_path).rsplit('.', 1)[0] + '.png'
1077
+ with open(png_path, 'wb') as out:
1078
+ out.write(output_data)
1079
+
1080
+ return png_path
1081
+ except ImportError:
1082
+ logger.error("rembg kutubxonasi o'rnatilmagan.")
1083
+ return None
1084
+ except Exception as e:
1085
+ logger.error(f"Background Remove xatosi: {e}")
1086
+ return None
1087
+
1088
+ def process_video_subtitle_translate(video_path, output_path, target_lang="uz", progress_callback=None):
1089
+ """Videoga avtomatik subtitle + tarjima qo'shadi."""
1090
+ from PIL import ImageFont, ImageDraw
1091
+
1092
+ try:
1093
+ video_path = os.path.abspath(video_path)
1094
+ output_path = os.path.abspath(output_path)
1095
+
1096
+ video = VideoFileClip(video_path)
1097
+
1098
+ if video.duration > 300:
1099
+ logger.warning(f"Video juda uzun: {video.duration:.0f}s")
1100
+ video.close()
1101
+ return None
1102
+
1103
+ if video.audio is None:
1104
+ video.close()
1105
+ return None
1106
+
1107
+ temp_audio = os.path.join(os.path.dirname(video_path), f"tmp_audio_tr_{os.getpid()}.wav")
1108
+ video.audio.write_audiofile(temp_audio, fps=16000, nbytes=2, codec='pcm_s16le', logger=None)
1109
+ video.close()
1110
+
1111
+ if progress_callback:
1112
+ progress_callback(10)
1113
+
1114
+ # Whisper orqali nutq aniqlash
1115
+ segments, lang = _transcribe_audio(temp_audio)
1116
+
1117
+ if os.path.exists(temp_audio):
1118
+ try: os.remove(temp_audio)
1119
+ except: pass
1120
+
1121
+ if not segments:
1122
+ return None
1123
+
1124
+ if progress_callback:
1125
+ progress_callback(40)
1126
+
1127
+ # Tarjima qilish
1128
+ try:
1129
+ from deep_translator import GoogleTranslator
1130
+ translator = GoogleTranslator(source='auto', target=target_lang)
1131
+
1132
+ for seg in segments:
1133
+ if seg['text']:
1134
+ try:
1135
+ seg['text'] = translator.translate(seg['text'])
1136
+ except:
1137
+ pass # Tarjima xato bo'lsa, asl matnni qoldirish
1138
+ except ImportError:
1139
+ logger.warning("deep-translator o'rnatilmagan, asl matn ishlatiladi.")
1140
+
1141
+ if progress_callback:
1142
+ progress_callback(55)
1143
+
1144
+ # Shrift
1145
+ font_path = None
1146
+ for fp in ["/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
1147
+ "/usr/share/fonts/truetype/freefont/FreeSansBold.ttf",
1148
+ "C:/Windows/Fonts/arialbd.ttf"]:
1149
+ if os.path.exists(fp):
1150
+ font_path = fp
1151
+ break
1152
+
1153
+ def subtitle_frame(get_frame, t):
1154
+ frame = get_frame(t)
1155
+ h, w = frame.shape[:2]
1156
+
1157
+ current_text = None
1158
+ for seg in segments:
1159
+ if seg['start'] <= t <= seg['end']:
1160
+ current_text = seg['text']
1161
+ break
1162
+
1163
+ if not current_text:
1164
+ return frame
1165
+
1166
+ pil_img = Image.fromarray(frame)
1167
+ draw = ImageDraw.Draw(pil_img)
1168
+ font_size = max(18, int(h * 0.04))
1169
+ try:
1170
+ font = ImageFont.truetype(font_path, font_size) if font_path else ImageFont.load_default()
1171
+ except:
1172
+ font = ImageFont.load_default()
1173
+
1174
+ max_chars = max(20, int(w / (font_size * 0.55)))
1175
+ lines = []
1176
+ words = current_text.split()
1177
+ current_line = ""
1178
+ for word in words:
1179
+ test = current_line + (" " if current_line else "") + word
1180
+ if len(test) <= max_chars:
1181
+ current_line = test
1182
+ else:
1183
+ if current_line:
1184
+ lines.append(current_line)
1185
+ current_line = word
1186
+ if current_line:
1187
+ lines.append(current_line)
1188
+
1189
+ if not lines:
1190
+ return frame
1191
+
1192
+ line_height = font_size + 6
1193
+ total_h = line_height * len(lines)
1194
+ bg_y = h - total_h - 30
1195
+
1196
+ overlay = np.array(pil_img)
1197
+ overlay[bg_y:h-5, 10:w-10] = (overlay[bg_y:h-5, 10:w-10] * 0.3).astype(np.uint8)
1198
+ pil_img = Image.fromarray(overlay)
1199
+ draw = ImageDraw.Draw(pil_img)
1200
+
1201
+ for i, line in enumerate(lines):
1202
+ bbox = draw.textbbox((0, 0), line, font=font)
1203
+ tx = (w - (bbox[2] - bbox[0])) // 2
1204
+ ty = bg_y + 10 + (i * line_height)
1205
+ draw.text((tx+2, ty+2), line, font=font, fill=(0, 0, 0))
1206
+ draw.text((tx, ty), line, font=font, fill=(255, 255, 255))
1207
+
1208
+ return np.array(pil_img)
1209
+
1210
+ video = VideoFileClip(video_path)
1211
+ from moviepy import VideoClip
1212
+
1213
+ subtitled = VideoClip(
1214
+ lambda t: subtitle_frame(video.get_frame, t),
1215
+ duration=video.duration
1216
+ )
1217
+
1218
+ if video.audio:
1219
+ subtitled = subtitled.with_audio(video.audio)
1220
+
1221
+ audio_params = {"audio_codec": "aac"} if video.audio else {"audio": False}
1222
+ subtitled.write_videofile(
1223
+ output_path, codec="libx264", fps=video.fps or 24,
1224
+ preset="ultrafast", threads=4, logger=None, **audio_params
1225
+ )
1226
+
1227
+ video.close()
1228
+ subtitled.close()
1229
+
1230
+ if progress_callback:
1231
+ progress_callback(99)
1232
+
1233
+ return output_path if os.path.exists(output_path) else None
1234
+ except Exception as e:
1235
+ logger.error(f"Subtitle Translate xatosi: {e}")
1236
+ return None
1237
+
1238
+ def apply_style_transfer(image_path, output_path, style="anime"):
1239
+ """HF Inference API orqali rasmga badiiy uslub qo'llaydi."""
1240
+ try:
1241
+ from huggingface_hub import InferenceClient
1242
+
1243
+ hf_token = os.environ.get("HF_TOKEN")
1244
+ client = InferenceClient(token=hf_token)
1245
+
1246
+ style_prompts = {
1247
+ "anime": "anime style, vibrant colors, clean lines, studio ghibli",
1248
+ "sketch": "pencil sketch, black and white drawing, detailed shading",
1249
+ "oil": "oil painting style, thick brush strokes, impressionist",
1250
+ "cartoon": "cartoon style, bold outlines, bright flat colors, pixar style"
1251
+ }
1252
+
1253
+ prompt = style_prompts.get(style, style_prompts["anime"])
1254
+
1255
+ with open(os.path.abspath(image_path), 'rb') as f:
1256
+ image_data = f.read()
1257
+
1258
+ result = client.image_to_image(
1259
+ image_data,
1260
+ prompt=prompt,
1261
+ model="timbrooks/instruct-pix2pix",
1262
+ guidance_scale=7.5,
1263
+ image_guidance_scale=1.5,
1264
+ )
1265
+
1266
+ result.save(os.path.abspath(output_path))
1267
+ return output_path
1268
+ except ImportError:
1269
+ logger.error("huggingface_hub kutubxonasi o'rnatilmagan.")
1270
+ return None
1271
+ except Exception as e:
1272
+ logger.error(f"Style Transfer xatosi: {e}")
1273
+ return None
main.py CHANGED
@@ -54,7 +54,11 @@ from filters import (
54
  process_video_bw, process_video_color_correct, process_video_remove_audio,
55
  process_video_trim, process_video_face_fix, process_video_auto_enhance,
56
  process_video_fps_boost, apply_nudenet_filter, process_video_nnsfw,
57
- process_video_subtitle
 
 
 
 
58
  )
59
  from database import db
60
  from concurrent.futures import ThreadPoolExecutor
@@ -236,6 +240,22 @@ async def handle_photo(update: Update, context: ContextTypes.DEFAULT_TYPE):
236
  [
237
  InlineKeyboardButton("πŸ›‘οΈ Media Shield", callback_data=f"n|p|{short_id}")
238
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  [
240
  InlineKeyboardButton("πŸ”™ Bekor qilish", callback_data="nav|main")
241
  ]
@@ -295,6 +315,20 @@ async def handle_video(update: Update, context: ContextTypes.DEFAULT_TYPE):
295
  [
296
  InlineKeyboardButton("πŸ“ Auto-Subtitle", callback_data=f"sub|v|{short_id}")
297
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  [
299
  InlineKeyboardButton("πŸ”™ Bekor qilish", callback_data="nav|main")
300
  ]
@@ -391,13 +425,23 @@ async def button_handler(update: Update, context: ContextTypes.DEFAULT_TYPE):
391
  func = {
392
  "r": apply_retro_filter, "u": upscale_image,
393
  "f": apply_face_restore, "a": apply_auto_enhance,
394
- "n": apply_nudenet_filter
 
 
395
  }.get(action)
396
- if not func:
 
 
 
 
 
 
 
397
  logger.error(f"Unknown action: {action}")
398
  await context.bot.send_message(query.message.chat_id, "❌ Noma'lum amal.")
399
  return
400
- success_path = await loop.run_in_executor(executor, func, input_path, output_path)
 
401
  else:
402
  # FPS Boost uchun alohida mantiq
403
  if action in ("fps30", "fps60", "fps120"):
@@ -418,9 +462,21 @@ async def button_handler(update: Update, context: ContextTypes.DEFAULT_TYPE):
418
  "vf": process_video_face_fix,
419
  "va": process_video_auto_enhance,
420
  "n": process_video_nnsfw,
421
- "sub": process_video_subtitle
 
 
 
422
  }
423
- func = video_funcs.get(action)
 
 
 
 
 
 
 
 
 
424
  if not func:
425
  logger.error(f"Unknown video action: {action}")
426
  await context.bot.send_message(query.message.chat_id, "❌ Noma'lum video amali.")
@@ -437,8 +493,13 @@ async def button_handler(update: Update, context: ContextTypes.DEFAULT_TYPE):
437
  "ra": "ovoz olib tashlash", "t": "kesish", "vf": "video face fix",
438
  "va": "video auto enhance", "fps30": "30 FPS boost",
439
  "fps60": "60 FPS boost", "fps120": "120 FPS boost",
440
- "n": "media shield",
441
- "sub": "auto-subtitle"
 
 
 
 
 
442
  }.get(action, f"filter_{action}")
443
  db.log_history(query.from_user.id, "photo" if m_type == "p" else "video", f_name, file_id)
444
 
 
54
  process_video_bw, process_video_color_correct, process_video_remove_audio,
55
  process_video_trim, process_video_face_fix, process_video_auto_enhance,
56
  process_video_fps_boost, apply_nudenet_filter, process_video_nnsfw,
57
+ process_video_subtitle,
58
+ apply_glitch_filter, apply_mirror_filter, apply_watermark, apply_bg_remove,
59
+ apply_style_transfer,
60
+ process_video_glitch, process_video_mirror, process_video_watermark,
61
+ process_video_subtitle_translate
62
  )
63
  from database import db
64
  from concurrent.futures import ThreadPoolExecutor
 
240
  [
241
  InlineKeyboardButton("πŸ›‘οΈ Media Shield", callback_data=f"n|p|{short_id}")
242
  ],
243
+ [
244
+ InlineKeyboardButton("πŸͺ„ Glitch", callback_data=f"gl|p|{short_id}"),
245
+ InlineKeyboardButton("πŸͺž Mirror", callback_data=f"mr|p|{short_id}")
246
+ ],
247
+ [
248
+ InlineKeyboardButton("🎨 Fon o'chirish", callback_data=f"bg|p|{short_id}"),
249
+ InlineKeyboardButton("πŸ“± Watermark", callback_data=f"wm|p|{short_id}")
250
+ ],
251
+ [
252
+ InlineKeyboardButton("πŸ–ŒοΈ Anime", callback_data=f"st_anime|p|{short_id}"),
253
+ InlineKeyboardButton("πŸ–ŒοΈ Sketch", callback_data=f"st_sketch|p|{short_id}")
254
+ ],
255
+ [
256
+ InlineKeyboardButton("πŸ–ŒοΈ Oil Paint", callback_data=f"st_oil|p|{short_id}"),
257
+ InlineKeyboardButton("πŸ–ŒοΈ Cartoon", callback_data=f"st_cart|p|{short_id}")
258
+ ],
259
  [
260
  InlineKeyboardButton("πŸ”™ Bekor qilish", callback_data="nav|main")
261
  ]
 
315
  [
316
  InlineKeyboardButton("πŸ“ Auto-Subtitle", callback_data=f"sub|v|{short_id}")
317
  ],
318
+ [
319
+ InlineKeyboardButton("πŸͺ„ Glitch", callback_data=f"gl|v|{short_id}"),
320
+ InlineKeyboardButton("πŸͺž Mirror", callback_data=f"mr|v|{short_id}")
321
+ ],
322
+ [
323
+ InlineKeyboardButton("πŸ“± Watermark", callback_data=f"wm|v|{short_id}")
324
+ ],
325
+ [
326
+ InlineKeyboardButton("🌐 Sub+O'zbek", callback_data=f"stuz|v|{short_id}"),
327
+ InlineKeyboardButton("🌐 Sub+Rus", callback_data=f"stru|v|{short_id}")
328
+ ],
329
+ [
330
+ InlineKeyboardButton("🌐 Sub+English", callback_data=f"sten|v|{short_id}")
331
+ ],
332
  [
333
  InlineKeyboardButton("πŸ”™ Bekor qilish", callback_data="nav|main")
334
  ]
 
425
  func = {
426
  "r": apply_retro_filter, "u": upscale_image,
427
  "f": apply_face_restore, "a": apply_auto_enhance,
428
+ "n": apply_nudenet_filter,
429
+ "gl": apply_glitch_filter, "mr": apply_mirror_filter,
430
+ "wm": apply_watermark, "bg": apply_bg_remove,
431
  }.get(action)
432
+
433
+ # Style Transfer uchun alohida mantiq
434
+ if action.startswith("st_"):
435
+ style = action.replace("st_", "").replace("cart", "cartoon")
436
+ success_path = await loop.run_in_executor(
437
+ executor, apply_style_transfer, input_path, output_path, style
438
+ )
439
+ elif not func:
440
  logger.error(f"Unknown action: {action}")
441
  await context.bot.send_message(query.message.chat_id, "❌ Noma'lum amal.")
442
  return
443
+ else:
444
+ success_path = await loop.run_in_executor(executor, func, input_path, output_path)
445
  else:
446
  # FPS Boost uchun alohida mantiq
447
  if action in ("fps30", "fps60", "fps120"):
 
462
  "vf": process_video_face_fix,
463
  "va": process_video_auto_enhance,
464
  "n": process_video_nnsfw,
465
+ "sub": process_video_subtitle,
466
+ "gl": process_video_glitch,
467
+ "mr": process_video_mirror,
468
+ "wm": process_video_watermark
469
  }
470
+
471
+ # Subtitle Tarjima uchun alohida mantiq
472
+ if action in ("stuz", "stru", "sten"):
473
+ lang_map = {"stuz": "uz", "stru": "ru", "sten": "en"}
474
+ target = lang_map[action]
475
+ success_path = await loop.run_in_executor(
476
+ executor, process_video_subtitle_translate, input_path, output_path, target, callback
477
+ )
478
+ else:
479
+ func = video_funcs.get(action)
480
  if not func:
481
  logger.error(f"Unknown video action: {action}")
482
  await context.bot.send_message(query.message.chat_id, "❌ Noma'lum video amali.")
 
493
  "ra": "ovoz olib tashlash", "t": "kesish", "vf": "video face fix",
494
  "va": "video auto enhance", "fps30": "30 FPS boost",
495
  "fps60": "60 FPS boost", "fps120": "120 FPS boost",
496
+ "n": "media shield",
497
+ "sub": "auto-subtitle",
498
+ "gl": "glitch", "mr": "mirror",
499
+ "wm": "watermark", "bg": "fon o'chirish",
500
+ "st_anime": "AI anime", "st_sketch": "AI sketch",
501
+ "st_oil": "AI oil paint", "st_cart": "AI cartoon",
502
+ "stuz": "sub+o'zbek", "stru": "sub+rus", "sten": "sub+english"
503
  }.get(action, f"filter_{action}")
504
  db.log_history(query.from_user.id, "photo" if m_type == "p" else "video", f_name, file_id)
505
 
requirements.txt CHANGED
@@ -12,3 +12,6 @@ httpx[socks]
12
  nudenet
13
  onnxruntime
14
  faster-whisper
 
 
 
 
12
  nudenet
13
  onnxruntime
14
  faster-whisper
15
+ rembg[cpu]
16
+ deep-translator
17
+ huggingface_hub