Files changed (1) hide show
  1. app.py +191 -344
app.py CHANGED
@@ -1,354 +1,201 @@
1
- # app.py
2
  import os
3
- import oss2
4
- import sys
5
- import uuid
6
- import shutil
7
  import time
8
- import gradio as gr
9
- import requests
10
-
11
- from pathlib import Path
12
- from datetime import datetime, timedelta
13
-
14
- import dashscope
15
- # from dashscope.utils.oss_utils import check_and_upload_local
16
-
17
- DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
18
- dashscope.api_key = DASHSCOPE_API_KEY
19
-
20
-
21
-
22
- def get_upload_policy(api_key, model_name):
23
- """获取文件上传凭证"""
24
- url = "https://dashscope.aliyuncs.com/api/v1/uploads"
25
- headers = {
26
- "Authorization": f"Bearer {api_key}",
27
- "Content-Type": "application/json"
28
- }
29
- params = {
30
- "action": "getPolicy",
31
- "model": model_name
32
- }
33
-
34
- response = requests.get(url, headers=headers, params=params)
35
- if response.status_code != 200:
36
- raise Exception(f"Failed to get upload policy: {response.text}")
37
-
38
- return response.json()['data']
39
-
40
- def upload_file_to_oss(policy_data, file_path):
41
- """将文件上传到临时存储OSS"""
42
- file_name = Path(file_path).name
43
- key = f"{policy_data['upload_dir']}/{file_name}"
44
-
45
- with open(file_path, 'rb') as file:
46
- files = {
47
- 'OSSAccessKeyId': (None, policy_data['oss_access_key_id']),
48
- 'Signature': (None, policy_data['signature']),
49
- 'policy': (None, policy_data['policy']),
50
- 'x-oss-object-acl': (None, policy_data['x_oss_object_acl']),
51
- 'x-oss-forbid-overwrite': (None, policy_data['x_oss_forbid_overwrite']),
52
- 'key': (None, key),
53
- 'success_action_status': (None, '200'),
54
- 'file': (file_name, file)
55
- }
56
-
57
- response = requests.post(policy_data['upload_host'], files=files)
58
- if response.status_code != 200:
59
- raise Exception(f"Failed to upload file: {response.text}")
 
 
 
 
 
 
 
 
60
 
61
- return f"oss://{key}"
62
-
63
- def upload_file_and_get_url(api_key, model_name, file_path):
64
- """上传文件并获取URL"""
65
- # 1. 获取上传凭证,上传凭证接口有限流,超出限流将导致请求失败
66
- policy_data = get_upload_policy(api_key, model_name)
67
- # 2. 上传文件到OSS
68
- oss_url = upload_file_to_oss(policy_data, file_path)
69
 
70
- return oss_url
71
-
72
-
73
- class WanAnimateApp:
74
- def __init__(self, url, get_url):
75
- self.url = url
76
- self.get_url = get_url
77
-
78
- def predict(
79
- self,
80
- ref_img,
81
- video,
82
- model_id,
83
- model,
84
- ):
85
- # Upload files to OSS if needed and get URLs
86
- image_url = upload_file_and_get_url(DASHSCOPE_API_KEY, model_id, ref_img)
87
- video_url = upload_file_and_get_url(DASHSCOPE_API_KEY, model_id, video)
88
-
89
- # Prepare the request payload
90
- payload = {
91
- "model": model_id,
92
- "input": {
93
- "image_url": image_url,
94
- "video_url": video_url
95
- },
96
- "parameters": {
97
- "check_image": True,
98
- "mode": model,
99
- }
100
- }
101
-
102
- # Set up headers
103
- headers = {
104
- "X-DashScope-Async": "enable",
105
- "X-DashScope-OssResourceResolve": "enable",
106
- "Authorization": f"Bearer {DASHSCOPE_API_KEY}",
107
- "Content-Type": "application/json"
108
- }
109
-
110
- # Make the initial API request
111
- url = self.url
112
- response = requests.post(url, json=payload, headers=headers, timeout=60)
113
-
114
- # Check if request was successful
115
- if response.status_code != 200:
116
- raise Exception(f"Initial request failed with status code {response.status_code}: {response.text}")
117
-
118
- # Get the task ID from response
119
- result = response.json()
120
- task_id = result.get("output", {}).get("task_id")
121
- if not task_id:
122
- raise Exception("Failed to get task ID from response")
123
 
124
- # Poll for results
125
- get_url = f"{self.get_url}/{task_id}"
126
- headers = {
127
- "Authorization": f"Bearer {DASHSCOPE_API_KEY}",
128
- "Content-Type": "application/json"
129
- }
130
 
131
- while True:
132
- response = requests.get(get_url, headers=headers, timeout=60)
133
- if response.status_code != 200:
134
- raise Exception(f"Failed to get task status: {response.status_code}: {response.text}")
135
-
136
- result = response.json()
137
- print(result)
138
- task_status = result.get("output", {}).get("task_status")
139
-
140
- if task_status == "SUCCEEDED":
141
- # Task completed successfully, return video URL
142
- video_url = result["output"]["results"]["video_url"]
143
- return video_url, "SUCCEEDED"
144
- elif task_status == "PENDING" or task_status == "RUNNING":
145
- # Task is still running, wait and retry
146
- time.sleep(10) # Wait 10 seconds before polling again
147
- else:
148
- # Task failed or unknown, raise an exception with error message
149
- error_msg = result.get("output", {}).get("message", "Unknown error")
150
- code_msg = result.get("output", {}).get("code", "Unknown code")
151
- print(f"\n\nTask failed: {error_msg} Code: {code_msg} TaskId: {task_id}\n\n")
152
- return None, f"Task failed: {error_msg} Code: {code_msg} TaskId: {task_id}"
153
- # raise Exception(f"Task failed: {error_msg} TaskId: {task_id}")
154
-
155
- def start_app():
156
- import argparse
157
- parser = argparse.ArgumentParser(description="Wan2.2-Animate 视频生成工具")
158
- args = parser.parse_args()
159
 
160
- url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/image2video/video-synthesis/"
161
- # url = "https://poc-dashscope.aliyuncs.com/api/v1/services/aigc/image2video/video-synthesis"
162
-
163
- get_url = f"https://dashscope.aliyuncs.com/api/v1/tasks/"
164
- # get_url = f"https://poc-dashscope.aliyuncs.com/api/v1/tasks"
165
- app = WanAnimateApp(url=url, get_url=get_url)
166
-
167
- with gr.Blocks(title="Wan2.2-Animate 视频生成") as demo:
168
- gr.HTML("""
169
-
170
-
171
- <div style="padding: 2rem; text-align: center; max-width: 1200px; margin: 0 auto; font-family: Arial, sans-serif;">
172
-
173
- <h1 style="font-size: 2.5rem; font-weight: bold; margin-bottom: 0.5rem; color: #333;">
174
- Wan2.2-Animate: Unified Character Animation and Replacement with Holistic Replication
175
- </h1>
176
-
177
- <h3 style="font-size: 2.5rem; font-weight: bold; margin-bottom: 0.5rem; color: #333;">
178
- Wan2.2-Animate: 统一的角色动画和视频人物替换模型
179
- </h3>
180
-
181
- <div style="font-size: 1.25rem; margin-bottom: 1.5rem; color: #555;">
182
- Tongyi Lab, Alibaba
183
- </div>
184
-
185
- <div style="display: flex; flex-wrap: wrap; justify-content: center; gap: 1rem; margin-bottom: 1rem;">
186
- <!-- 第一行按钮 -->
187
- <a href="https://arxiv.org/abs/2509.14055" target="_blank"
188
- style="display: inline-flex; align-items: center; padding: 0.5rem 1rem; background-color: #f0f0f0; /* 浅灰色背景 */ color: #333; /* 深色文字 */ text-decoration: none; border-radius: 9999px; font-weight: 500; transition: background-color 0.3s;">
189
- <span style="margin-right: 0.5rem;">📄</span> <!-- 使用文档图标 -->
190
- <span>Paper</span>
191
- </a>
192
-
193
- <a href="https://github.com/Wan-Video/Wan2.2" target="_blank"
194
- style="display: inline-flex; align-items: center; padding: 0.5rem 1rem; background-color: #f0f0f0; color: #333; text-decoration: none; border-radius: 9999px; font-weight: 500; transition: background-color 0.3s;">
195
- <span style="margin-right: 0.5rem;">💻</span> <!-- 使用电脑图标 -->
196
- <span>GitHub</span>
197
- </a>
198
-
199
- <a href="https://huggingface.co/Wan-AI/Wan2.2-Animate-14B" target="_blank"
200
- style="display: inline-flex; align-items: center; padding: 0.5rem 1rem; background-color: #f0f0f0; color: #333; text-decoration: none; border-radius: 9999px; font-weight: 500; transition: background-color 0.3s;">
201
- <span style="margin-right: 0.5rem;">🤗</span>
202
- <span>HF Model</span>
203
- </a>
204
-
205
- <a href="https://www.modelscope.cn/models/Wan-AI/Wan2.2-Animate-14B" target="_blank"
206
- style="display: inline-flex; align-items: center; padding: 0.5rem 1rem; background-color: #f0f0f0; color: #333; text-decoration: none; border-radius: 9999px; font-weight: 500; transition: background-color 0.3s;">
207
- <span style="margin-right: 0.5rem;">🤖</span>
208
- <span>MS Model</span>
209
- </a>
210
- </div>
211
-
212
- <div style="display: flex; flex-wrap: wrap; justify-content: center; gap: 1rem;">
213
- <!-- 第二行按钮 -->
214
- <a href="https://huggingface.co/spaces/Wan-AI/Wan2.2-Animate" target="_blank"
215
- style="display: inline-flex; align-items: center; padding: 0.5rem 1rem; background-color: #f0f0f0; color: #333; text-decoration: none; border-radius: 9999px; font-weight: 500; transition: background-color 0.3s;">
216
- <span style="margin-right: 0.5rem;">🤗</span>
217
- <span>HF Space</span>
218
- </a>
219
-
220
- <a href="https://www.modelscope.cn/studios/Wan-AI/Wan2.2-Animate" target="_blank"
221
- style="display: inline-flex; align-items: center; padding: 0.5rem 1rem; background-color: #f0f0f0; color: #333; text-decoration: none; border-radius: 9999px; font-weight: 500; transition: background-color 0.3s;">
222
- <span style="margin-right: 0.5rem;">🤖</span>
223
- <span>MS Studio</span>
224
- </a>
225
- </div>
226
-
227
- </div>
228
-
229
- """)
230
-
231
- gr.HTML("""
232
- <details>
233
- <summary>‼️Usage (使用说明)</summary>
234
-
235
- Wan-Animate supports two mode:
236
- <ul>
237
- <li>Move Mode: animate the character in input image with movements from the input video</li>
238
- <li>Mix Mode: replace the character in input video with the character in input image</li>
239
- </ul>
240
-
241
- Wan-Animate 支持两种模式:
242
- <ul>
243
- <li>Move模式: 用输入视频中提取的动作,驱动输入图片中的角色</li>
244
- <li>Mix模式: 用输入图片中的角色,替换输入视频中的角色</li>
245
- </ul>
246
-
247
- Currently, the following restrictions apply to inputs:
248
-
249
- <ul> <li>Video file size: Less than 200MB</li>
250
- <li>Video resolution: The shorter side must be greater than 200, and the longer side must be less than 2048</li>
251
- <li>Video duration: 2s to 30s</li>
252
- <li>Video aspect ratio: 1:3 to 3:1</li>
253
- <li>Video formats: mp4, avi, mov</li>
254
- <li>Image file size: Less than 5MB</li>
255
- <li>Image resolution: The shorter side must be greater than 200, and the longer side must be less than 4096</li>
256
- <li>Image formats: jpg, png, jpeg, webp, bmp</li> </ul>
257
-
258
-
259
- 当前,对于输入有以下的限制
260
-
261
- <ul>
262
- <li>视频文件大小: 小于 200MB</li>
263
- <li>视频分辨率: 最小边大于 200, 最大边小于2048</li>
264
- <li>视频时长: 2s ~ 30s </li>
265
- <li>视频比例:1:3 ~ 3:1 </li>
266
- <li>视频格式: mp4, avi, mov </li>
267
- <li>图片文件大小: 小于5MB </li>
268
- <li>图片分辨率:最小边大于200,最大边小于4096 </li>
269
- <li>图片格式: jpg, png, jpeg, webp, bmp </li>
270
- </ul>
271
-
272
- <p> Currently, the inference quality has two variants. You can use our open-source code for more flexible configuration. </p>
273
-
274
- <p>当前,推理质量有两个变种。 您可以使用我们的开源代码,来进行更灵活的设置。</p>
275
-
276
- <ul>
277
- <li> wan-pro: 25fps, 720p </li>
278
- <li> wan-std: 15fps, 720p </li>
279
- </ul>
280
-
281
-
282
- </details>
283
- """)
284
-
285
- with gr.Row():
286
- with gr.Column():
287
- ref_img = gr.Image(
288
- label="Reference Image(参考图像)",
289
- type="filepath",
290
- sources=["upload"],
291
- )
292
-
293
- video = gr.Video(
294
- label="Template Video(模版视频)",
295
- sources=["upload"],
296
- )
297
-
298
- with gr.Row():
299
- model_id = gr.Dropdown(
300
- label="Mode(模式)",
301
- choices=["wan2.2-animate-move", "wan2.2-animate-mix"],
302
- value="wan2.2-animate-move",
303
- info=""
304
- )
305
-
306
- model = gr.Dropdown(
307
- label="推理质量(Inference Quality)",
308
- choices=["wan-pro", "wan-std"],
309
- value="wan-pro",
310
- )
311
-
312
- run_button = gr.Button("Generate Video(生成视频)")
313
-
314
- with gr.Column():
315
- output_video = gr.Video(label="Output Video(输出视频)")
316
- output_status = gr.Textbox(label="Status(状态)")
317
-
318
- run_button.click(
319
- fn=app.predict,
320
- inputs=[
321
- ref_img,
322
- video,
323
- model_id,
324
- model,
325
- ],
326
- outputs=[output_video, output_status],
327
- )
328
-
329
- example_data = [
330
- ['./examples/mov/1/1.jpeg', './examples/mov/1/1.mp4', 'wan2.2-animate-move', 'wan-pro'],
331
- ['./examples/mov/2/2.jpeg', './examples/mov/2/2.mp4', 'wan2.2-animate-move', 'wan-pro'],
332
- ['./examples/mix/1/1.jpeg', './examples/mix/1/1.mp4', 'wan2.2-animate-mix', 'wan-pro'],
333
- ['./examples/mix/2/2.jpeg', './examples/mix/2/2.mp4', 'wan2.2-animate-mix', 'wan-pro']
334
- ]
335
-
336
- if example_data:
337
- gr.Examples(
338
- examples=example_data,
339
- inputs=[ref_img, video, model_id, model],
340
- outputs=[output_video, output_status],
341
- fn=app.predict,
342
- cache_examples="lazy",
343
- )
344
 
345
- demo.queue(default_concurrency_limit=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
347
- demo.launch(
348
- server_name="0.0.0.0",
349
- server_port=7860
350
- )
351
-
352
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  if __name__ == "__main__":
354
- start_app()
 
 
 
 
 
 
 
 
 
1
+ import modal
2
  import os
 
 
 
 
3
  import time
4
+ import asyncio
5
+ import subprocess
6
+ from flask import Flask
7
+ from threading import Thread
8
+ from telegram import Update
9
+ from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters, ContextTypes
10
+
11
+ # ─────────────────────────────────────────────────────────────
12
+ # 0. RENDER İÇİN SAHTE WEB SUNUCUSU (7/24 HİLESİ)
13
+ # ─────────────────────────────────────────────────────────────
14
+ web_app = Flask(__name__)
15
+
16
+ @web_app.route('/')
17
+ def home():
18
+ return "MrBeast Fabrikası 7/24 Aktif! 🚀"
19
+
20
+ def run_flask():
21
+ # Render PORT çevre değişkenini kullanır
22
+ port = int(os.environ.get("PORT", 8080))
23
+ web_app.run(host="0.0.0.0", port=port)
24
+
25
+ # ─────────────────────────────────────────────────────────────
26
+ # 1. MODAL KURULUMU
27
+ # ─────────────────────────────────────────────────────────────
28
+ def download_models():
29
+ import whisper, clip, torch
30
+ print("🤖 [MODAL] Modeller indiriliyor (Whisper & CLIP)...")
31
+ whisper.load_model("medium")
32
+ clip.load("ViT-L/14", device="cpu")
33
+
34
+ image = (
35
+ modal.Image.debian_slim(python_version="3.10")
36
+ .apt_install("ffmpeg", "git", "libdav1d-dev", "libavcodec-extra", "libass-dev", "curl", "fontconfig")
37
+ .run_commands(
38
+ "mkdir -p /usr/share/fonts/truetype/montserrat",
39
+ "curl -L https://github.com/google/fonts/raw/main/ofl/montserrat/static/Montserrat-ExtraBold.ttf -o /usr/share/fonts/truetype/montserrat/Montserrat-ExtraBold.ttf",
40
+ "fc-cache -fv"
41
+ )
42
+ .pip_install("torch", "torchvision", "openai-whisper", "moviepy==1.0.3", "scenedetect[opencv]", "deep-translator", "python-telegram-bot", "flask")
43
+ .pip_install("git+https://github.com/openai/CLIP.git")
44
+ .run_function(download_models)
45
+ )
46
+
47
+ app = modal.App("telegram-beast-v45")
48
+
49
+ # ─────────────────────────────────────────────────────────────
50
+ # 2. ANA MOTOR (A10G GPU)
51
+ # ─────────────────────────────────────────────────────────────
52
+ def masterpiece_engine(video_path, audio_path):
53
+ import torch, clip, whisper, subprocess, time
54
+ from PIL import Image
55
+ from moviepy.editor import VideoFileClip, AudioFileClip, concatenate_videoclips
56
+ import moviepy.video.fx.all as vfx
57
+ from scenedetect import VideoManager, SceneManager
58
+ from scenedetect.detectors import ContentDetector
59
+ from deep_translator import GoogleTranslator
60
+
61
+ print("🎬 [ENGINE] Kurgu motoru ateşlendi...")
62
+ device = "cuda" if torch.cuda.is_available() else "cpu"
63
+ whisper_model = whisper.load_model("medium", device=device)
64
 
65
+ trans = whisper_model.transcribe(audio_path, language=None, word_timestamps=True)
66
+ full_v = VideoFileClip(video_path).without_audio()
67
+ audio_clip = AudioFileClip(audio_path)
68
+ audio_dur = audio_clip.duration
 
 
 
 
69
 
70
+ vm = VideoManager([video_path]); sm = SceneManager()
71
+ sm.add_detector(ContentDetector(threshold=27.0))
72
+ vm.start(); sm.detect_scenes(frame_source=vm)
73
+ s_list = sm.get_scene_list() or [(0, full_v.duration)]
74
+
75
+ clip_m, pre = clip.load("ViT-L/14", device=device)
76
+ pool = []
77
+ for start, end in s_list:
78
+ try:
79
+ s, e = start.get_seconds(), end.get_seconds()
80
+ if (e-s) < 0.2: continue
81
+ feat = clip_m.encode_image(pre(Image.fromarray(full_v.get_frame(s + (e-s)/2))).unsqueeze(0).to(device))
82
+ pool.append({'clip': full_v.subclip(s, e), 'feat': feat / feat.norm(dim=-1, keepdim=True), 'used': False})
83
+ except: continue
84
+
85
+ translator = GoogleTranslator(source='auto', target='en')
86
+ final_clips = []
87
+ scenes_per_seg = max(2, round(len(pool) / len(trans['segments'])))
88
+
89
+ for idx, seg in enumerate(trans['segments']):
90
+ t_en = translator.translate(seg['text'])
91
+ t_feat = clip_m.encode_text(clip.tokenize([t_en], truncate=True).to(device))
92
+ t_feat /= t_feat.norm(dim=-1, keepdim=True)
93
+ available = [p for p in pool if not p['used']] or pool
94
+ scores = sorted([( (t_feat @ sc['feat'].T).item(), i ) for i, sc in enumerate(available)], key=lambda x: x[0], reverse=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ seg_dur = seg['end'] - seg['start']
97
+ k = scenes_per_seg if seg_dur > 1.2 else 1
98
+ top_matches = [available[i[1]] for i in scores[:k]]
99
+ temp_clips = [match['clip'] for match in top_matches]
100
+ for match in top_matches: match['used'] = True
 
101
 
102
+ if temp_clips:
103
+ combined = concatenate_videoclips(temp_clips, method="chain")
104
+ final_clips.append(combined.fx(vfx.speedx, combined.duration/seg_dur).set_duration(seg_dur))
105
+
106
+ curr_dur = sum([c.duration for c in final_clips])
107
+ if curr_dur < audio_dur and len(final_clips) > 0:
108
+ gap = audio_dur - curr_dur
109
+ first_c = final_clips[0]
110
+ loop_p = first_c.subclip(0, min(gap, first_c.duration))
111
+ final_clips.append(loop_p.set_duration(gap))
112
+
113
+ final_video = concatenate_videoclips(final_clips, method="chain").set_audio(audio_clip).set_duration(audio_dur)
114
+ temp_raw = f"/tmp/raw_{time.time()}.mp4"
115
+ final_video.write_videofile(temp_raw, codec="libx264", audio_codec="aac", fps=30, preset='ultrafast', threads=8)
116
+
117
+ ass_path = f"/tmp/subs_{time.time()}.ass"
118
+ header = "[Script Info]\nScriptType: v4.00+\nPlayResX: 1080\nPlayResY: 1920\n\n[V4+ Styles]\nFormat: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding\nStyle: Default,Montserrat,85,&H00FFFFFF,&H0000FFFF,&H00000000,&H00000000,-1,0,0,0,100,100,0,0,1,8,0,2,180,180,400,1\n"
119
+ lines = [header, "\n[Events]\nFormat: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n"]
120
+ for seg in trans['segments']:
121
+ for w in seg.get('words', []):
122
+ def f_t(s): return f"{int(s//3600)}:{int((s%3600)//60):02d}:{int(s%60):02d}.{int(round((s%1)*100)):02d}"
123
+ lines.append(f"Dialogue: 0,{f_t(w['start'])},{f_t(w['end'])},Default,,0,0,0,," + r"{\c&H0000FFFF&}{\fscx0\fscy0\t(0,100,\fscx125\fscy125)\t(100,200,\fscx100\fscy100)}" + f"{w['word'].strip().upper()}\n")
 
 
 
 
 
 
124
 
125
+ with open(ass_path, "w", encoding="utf-8") as f: f.write("".join(lines))
126
+ final_out = f"/tmp/final_{time.time()}.mp4"
127
+ subprocess.run(["ffmpeg", "-y", "-i", temp_raw, "-vf", f"subtitles={ass_path}", "-c:a", "copy", final_out], check=True)
128
+ return final_out
129
+
130
+ # ─────────────────────────────────────────────────────────────
131
+ # 3. MODAL FONKSİYONU
132
+ # ─────────────────────────────────────────────────────────────
133
+ @app.function(image=image, gpu="A10G", timeout=3600, keep_warm=1)
134
+ async def process_via_telegram(vid_bytes, aud_bytes):
135
+ import os, subprocess
136
+ with open("/tmp/vid.mp4", "wb") as f: f.write(vid_bytes)
137
+ with open("/tmp/aud.mp3", "wb") as f: f.write(aud_bytes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
+ if os.path.getsize("/tmp/vid.mp4") > 15 * 1024 * 1024:
140
+ subprocess.run(["ffmpeg", "-y", "-i", "/tmp/vid.mp4", "-c:v", "libx264", "-crf", "28", "-preset", "veryfast", "-c:a", "copy", "/tmp/comp.mp4"], check=True)
141
+ os.replace("/tmp/comp.mp4", "/tmp/vid.mp4")
142
+
143
+ res_path = masterpiece_engine("/tmp/vid.mp4", "/tmp/aud.mp3")
144
+ with open(res_path, "rb") as f: return f.read()
145
+
146
+ # ─────────────────────────────────────────────────────────────
147
+ # 4. TELEGRAM BOTU
148
+ # ─────────────────────────────────────────────────────────────
149
+ TOKEN = "7700336574:AAHD-4d-4LCYgEMm4f4ccDEAnAqsuJ-wEWU"
150
+ user_data = {}
151
+
152
+ async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
153
+ await update.message.reply_text("Nağarsan kələ? Mənə bi dənə SƏS faylı (.mp3) at🚀")
154
+
155
+ async def handle_docs(update: Update, context: ContextTypes.DEFAULT_TYPE):
156
+ chat_id = update.effective_chat.id
157
+ doc = update.message.document or update.message.audio or update.message.video
158
+ if not doc: return
159
 
160
+ f_name = doc.file_name.lower() if hasattr(doc, 'file_name') and doc.file_name else ""
161
+ is_audio = f_name.endswith(('.mp3', '.m4a', '.wav')) or update.message.audio
162
+ is_video = f_name.endswith(('.mp4', '.mov', '.avi')) or update.message.video
163
+
164
+ if chat_id not in user_data:
165
+ if is_audio:
166
+ file = await context.bot.get_file(doc.file_id)
167
+ user_data[chat_id] = {'aud': await file.download_as_bytearray()}
168
+ await update.message.reply_text("✅ İndi VİDEO (.mp4) göndər.")
169
+ else:
170
+ await update.message.reply_text("Əvvəlcə SƏS göndər.")
171
+ else:
172
+ if is_video:
173
+ await update.message.reply_text("🚀 Hazırlanır, gözlə kələ...")
174
+ try:
175
+ file = await context.bot.get_file(doc.file_id)
176
+ vid_bytes = await file.download_as_bytearray()
177
+ res_bytes = await process_via_telegram.remote.aio(bytes(vid_bytes), bytes(user_data[chat_id]['aud']))
178
+
179
+ out = f"final_{chat_id}.mp4"
180
+ with open(out, "wb") as f: f.write(res_bytes)
181
+ await update.message.reply_document(document=open(out, "rb"))
182
+ os.remove(out)
183
+ del user_data[chat_id]
184
+ except Exception as e:
185
+ await update.message.reply_text(f"Hata: {str(e)}")
186
+ else:
187
+ await update.message.reply_text("Video göndər.")
188
+
189
+ # ─────────────────────────────────────────────────────────────
190
+ # 5. MAIN (RENDER UYUMLU)
191
+ # ─────────────────────────────────────────────────────────────
192
  if __name__ == "__main__":
193
+ # Arka planda Flask'ı başlat (Render 7/24 uyanık kalsın diye)
194
+ Thread(target=run_flask).start()
195
+
196
+ print("🚀 Bot başlatılıyor...")
197
+ with app.run():
198
+ application = ApplicationBuilder().token(TOKEN).build()
199
+ application.add_handler(CommandHandler("start", start))
200
+ application.add_handler(MessageHandler(filters.ALL, handle_docs))
201
+ application.run_polling(drop_pending_updates=True)