Luis-Filipe commited on
Commit
88c16c2
·
verified ·
1 Parent(s): 78f0656

Upload 3 files

Browse files
Dubbing_app/app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import gradio as gr
4
+ from moviepy.editor import VideoFileClip, AudioFileClip
5
+ import cv2
6
+ import torch
7
+ from facenet_pytorch import MTCNN
8
+ from transformers import MarianMTModel, MarianTokenizer
9
+
10
+ # Instalar os wheels (só na primeira execução)
11
+ # pip install qwen3_asr_toolkit-1.0.4-py3-none-any.whl
12
+ # pip install cosyvoice-2.0.0-py3-none-any.whl
13
+ # pip install liveportrait-1.0.0-py3-none-any.whl
14
+
15
+ from qwen3_asr_toolkit import transcribe_audio
16
+ from cosyvoice.cli.cosyvoice import CosyVoice2
17
+ from cosyvoice.utils.file_utils import load_wav
18
+ from liveportrait import LivePortraitPipeline
19
+
20
+ # === Configurações ===
21
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
22
+ MODEL_DIR = "models"
23
+
24
+ os.makedirs(MODEL_DIR, exist_ok=True)
25
+
26
+ # === 1. Baixar vídeo do YouTube ===
27
+ def download_youtube_video(url, output_path):
28
+ from yt_dlp import YoutubeDL
29
+ ydl_opts = {
30
+ 'format': 'best[height<=720]',
31
+ 'outtmpl': output_path,
32
+ 'quiet': True,
33
+ }
34
+ with YoutubeDL(ydl_opts) as ydl:
35
+ ydl.download([url])
36
+ return output_path
37
+
38
+ # === 2. Extrair áudio ===
39
+ def extract_audio(video_path, audio_path):
40
+ clip = VideoFileClip(video_path)
41
+ clip.audio.write_audiofile(audio_path, fps=16000, verbose=False, logger=None)
42
+
43
+ # === 3. Encontrar frame com rosto ===
44
+ def extract_face_frame(video_path, output_image):
45
+ mtcnn = MTCNN(keep_all=False, device=DEVICE)
46
+ cap = cv2.VideoCapture(video_path)
47
+ while True:
48
+ ret, frame = cap.read()
49
+ if not ret:
50
+ break
51
+ boxes, _ = mtcnn.detect(frame)
52
+ if boxes is not None:
53
+ cv2.imwrite(output_image, frame)
54
+ cap.release()
55
+ return output_image
56
+ cap.release()
57
+ raise ValueError("Nenhum rosto encontrado no vídeo.")
58
+
59
+ # === 4. Traduzir para pt-PT ===
60
+ def translate_to_pt(text):
61
+ model_name = "Helsinki-NLP/opus-mt-en-pt"
62
+ tokenizer = MarianTokenizer.from_pretrained(model_name)
63
+ model = MarianMTModel.from_pretrained(model_name).to(DEVICE)
64
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
65
+ translated = model.generate(**inputs)
66
+ result = tokenizer.decode(translated[0], skip_special_tokens=True)
67
+ return result
68
+
69
+ # === 5. Pipeline principal ===
70
+ def dub_youtube_video(youtube_url, target_lang="pt-PT"):
71
+ with tempfile.TemporaryDirectory() as tmpdir:
72
+ video_path = os.path.join(tmpdir, "input.mp4")
73
+ audio_path = os.path.join(tmpdir, "audio.wav")
74
+ face_image = os.path.join(tmpdir, "face.jpg")
75
+ dubbed_audio = os.path.join(tmpdir, "dubbed.wav")
76
+ output_video = os.path.join(tmpdir, "output.mp4")
77
+
78
+ # 1. Baixar vídeo
79
+ gr.Info("📥 A baixar vídeo do YouTube...")
80
+ download_youtube_video(youtube_url, video_path)
81
+
82
+ # 2. Extrair áudio
83
+ gr.Info("🔊 A extrair áudio...")
84
+ extract_audio(video_path, audio_path)
85
+
86
+ # 3. Transcrever
87
+ gr.Info("📝 A transcrever...")
88
+ text_en = transcribe_audio(audio_path)
89
+
90
+ # 4. Traduzir
91
+ gr.Info("🔄 A traduzir para português de Portugal...")
92
+ text_pt = translate_to_pt(text_en)
93
+
94
+ # 5. Clonar voz com CosyVoice
95
+ gr.Info("🗣️ A clonar voz e gerar áudio dublado...")
96
+ cosyvoice = CosyVoice2('pretrained_models/CosyVoice2-0.5B', load_jit=False, fp16=False)
97
+ prompt_audio = load_wav(audio_path, 16000)[:80000] # primeiros 5s
98
+ for i, chunk in enumerate(cosyvoice.inference_zero_shot(
99
+ text=text_pt,
100
+ prompt_text=text_en[:100], # trecho curto do original
101
+ prompt_speech_16k=prompt_audio,
102
+ stream=False
103
+ )):
104
+ torch.save(chunk['tts_speech'], dubbed_audio) # simplificado
105
+ break
106
+
107
+ # 6. Extrair rosto
108
+ gr.Info("👤 A encontrar rosto...")
109
+ extract_face_frame(video_path, face_image)
110
+
111
+ # 7. Lip sync com LivePortrait
112
+ gr.Info("🎬 A gerar lip sync realista...")
113
+ lp = LivePortraitPipeline()
114
+ lp.generate(source_image=face_image, driving_audio=dubbed_audio, output_path=output_video)
115
+
116
+ return output_video
117
+
118
+ # === Interface Gradio ===
119
+ iface = gr.Interface(
120
+ fn=dub_youtube_video,
121
+ inputs=gr.Textbox(label="URL do YouTube", placeholder="https://www.youtube.com/watch?v=..."),
122
+ outputs=gr.Video(label="Vídeo Dublado (pt-PT)"),
123
+ title="🎥 Dublagem Automática com Voz Clonada + Lip Sync Realista",
124
+ description="""
125
+ Insira um link do YouTube. A app irá:
126
+ - Baixar o vídeo
127
+ - Transcrever e traduzir para **português de Portugal**
128
+ - Clonar a voz original
129
+ - Gerar novo vídeo com **lip sync realista**
130
+ """
131
+ )
132
+
133
+ if __name__ == "__main__":
134
+ # Baixar modelos na primeira execução
135
+ from modelscope import snapshot_download
136
+ if not os.path.exists("pretrained_models/CosyVoice2-0.5B"):
137
+ snapshot_download('iic/CosyVoice2-0.5B', local_dir='pretrained_models/CosyVoice2-0.5B')
138
+ iface.launch(server_name="0.0.0.0", server_port=7860)
Dubbing_app/requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==4.40.0
2
+ yt-dlp
3
+ moviepy
4
+ opencv-python
5
+ facenet-pytorch
6
+ torch>=2.0.0
7
+ torchaudio>=2.0.0
8
+ transformers>=4.36.0
9
+ sentencepiece
10
+ pyyaml
11
+ tqdm
12
+ librosa>=0.10.0
13
+ soundfile
14
+ modelscope
Dubbing_app/run.bat ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pip install -r requirements.txt
2
+ pip install qwen3_asr_toolkit-1.0.4-py3-none-any.whl
3
+ pip install cosyvoice-2.0.0-py3-none-any.whl
4
+ pip install liveportrait-1.0.0-py3-none-any.whl