|
|
import os |
|
|
import json |
|
|
import subprocess |
|
|
import re |
|
|
import boto3 |
|
|
from botocore.config import Config |
|
|
from botocore.exceptions import ClientError |
|
|
import tempfile |
|
|
import shutil |
|
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed |
|
|
from multiprocessing import Manager, cpu_count |
|
|
from tqdm import tqdm |
|
|
import threading |
|
|
|
|
|
|
|
|
part_id = 2 |
|
|
|
|
|
|
|
|
|
|
|
S3_ENDPOINT_URL = "https://t3.storage.dev" |
|
|
AWS_ACCESS_KEY_ID = "tid_cqKPLHboixMUUQxq_ImANLFwrehWmWZHlEaPZXzXNbKxf_fugg" |
|
|
AWS_SECRET_ACCESS_KEY = "tsec_CXLclBpmOD2blVqdL+smpI52cOxQiXs-pH-INnfU6yfhc1MAajUTpI7xWO+5YAyLwyXjpq" |
|
|
AWS_REGION = "auto" |
|
|
S3_BUCKET = "youtube-downloads" |
|
|
S3_PREFIX = "VideoRaw" |
|
|
|
|
|
|
|
|
MERGED_ANNO_DIR = f"/share/eason/SpeakerVid/SpeakerVid-5M-Dataset/merged_anno_1_6/split1-6/split_{part_id}/" |
|
|
MISSING_VIDEOS_FILE = f"/share/eason/SpeakerVid/missing_videos_{part_id}.txt" |
|
|
|
|
|
|
|
|
S3_OUTPUT_PREFIX = "SenceCutV3" |
|
|
|
|
|
|
|
|
MAX_VIDEO_RES = 1920 |
|
|
|
|
|
|
|
|
NUM_VIDEO_WORKERS = 32 |
|
|
NUM_CLIP_WORKERS = 4 |
|
|
|
|
|
|
|
|
_thread_local = threading.local() |
|
|
|
|
|
|
|
|
def get_s3_client(): |
|
|
"""获取线程/进程本地的 S3 客户端""" |
|
|
if not hasattr(_thread_local, 's3_client'): |
|
|
_thread_local.s3_client = boto3.client( |
|
|
's3', |
|
|
endpoint_url=S3_ENDPOINT_URL, |
|
|
aws_access_key_id=AWS_ACCESS_KEY_ID, |
|
|
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, |
|
|
region_name=AWS_REGION, |
|
|
config=Config(signature_version='s3v4') |
|
|
) |
|
|
return _thread_local.s3_client |
|
|
|
|
|
|
|
|
def check_s3_file_exists(bucket, key): |
|
|
"""检查 S3 文件是否存在""" |
|
|
s3_client = get_s3_client() |
|
|
try: |
|
|
s3_client.head_object(Bucket=bucket, Key=key) |
|
|
return True |
|
|
except ClientError as e: |
|
|
if e.response['Error']['Code'] == '404': |
|
|
return False |
|
|
raise |
|
|
|
|
|
|
|
|
def download_from_s3(bucket, key, local_path): |
|
|
"""从 S3 下载文件""" |
|
|
s3_client = get_s3_client() |
|
|
try: |
|
|
s3_client.download_file(bucket, key, local_path) |
|
|
return True |
|
|
except ClientError as e: |
|
|
print(f"下载失败: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
def upload_to_s3(bucket, local_path, s3_key): |
|
|
"""上传文件到 S3""" |
|
|
s3_client = get_s3_client() |
|
|
try: |
|
|
s3_client.upload_file(local_path, bucket, s3_key) |
|
|
return True |
|
|
except ClientError as e: |
|
|
print(f"上传失败: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
def organize_videos_from_folder(folder_path: str) -> dict: |
|
|
"""从文件夹组织视频数据""" |
|
|
video_data = {} |
|
|
|
|
|
try: |
|
|
files = os.listdir(folder_path) |
|
|
|
|
|
for file in tqdm(files, desc="扫描 JSON 文件"): |
|
|
if not file.endswith(".json"): |
|
|
continue |
|
|
|
|
|
file_path = os.path.join(folder_path, file) |
|
|
filename = os.path.basename(file_path) |
|
|
|
|
|
if '_full_video_' not in filename: |
|
|
continue |
|
|
|
|
|
parts = filename.split('_full_video_') |
|
|
video_id_with_res = parts[0] |
|
|
|
|
|
|
|
|
video_id = re.sub(r'_\d+x\d+$', '', video_id_with_res) |
|
|
|
|
|
suffix_parts = parts[1].split('_') |
|
|
if len(suffix_parts) < 3: |
|
|
continue |
|
|
|
|
|
if video_id not in video_data: |
|
|
video_data[video_id] = [] |
|
|
video_data[video_id].append(file_path) |
|
|
|
|
|
except FileNotFoundError: |
|
|
print(f"错误: 文件夹 '{folder_path}' 不存在") |
|
|
return {} |
|
|
except Exception as e: |
|
|
print(f"处理文件时发生错误: {e}") |
|
|
return {} |
|
|
|
|
|
return video_data |
|
|
|
|
|
|
|
|
def process_single_clip(args): |
|
|
"""处理单个视频片段并上传到 S3(用于并行处理)""" |
|
|
org_video_path, json_file, temp_dir, max_video_res = args |
|
|
json_filename = os.path.basename(json_file).split('.')[0] |
|
|
|
|
|
try: |
|
|
with open(json_file, 'r') as f: |
|
|
clip_info = json.load(f) |
|
|
|
|
|
box = clip_info['bbox'] |
|
|
height = clip_info['raw_video_height'] |
|
|
width = clip_info['raw_video_width'] |
|
|
box = [box[0] * width, box[1] * height, box[2] * width, box[3] * height] |
|
|
start_time = clip_info['start_seconds'] |
|
|
duration = clip_info['video_total_duration'] |
|
|
|
|
|
|
|
|
crop_w = int(box[2] - box[0]) |
|
|
crop_h = int(box[3] - box[1]) |
|
|
crop_x = int(box[0]) |
|
|
crop_y = int(box[1]) |
|
|
|
|
|
|
|
|
if crop_w % 2 != 0: |
|
|
crop_w -= 1 |
|
|
if crop_h % 2 != 0: |
|
|
crop_h -= 1 |
|
|
if crop_x % 2 != 0: |
|
|
crop_x -= 1 |
|
|
if crop_y % 2 != 0: |
|
|
crop_y -= 1 |
|
|
|
|
|
|
|
|
video_name = os.path.basename(org_video_path).split('.')[0] |
|
|
temp_video_path = os.path.join(temp_dir, f"{json_filename}_{os.getpid()}.mp4") |
|
|
temp_audio_path = os.path.join(temp_dir, f"{json_filename}_{os.getpid()}.wav") |
|
|
|
|
|
|
|
|
s3_video_key = f"{S3_OUTPUT_PREFIX}/{video_name}/{json_filename}.mp4" |
|
|
s3_audio_key = f"{S3_OUTPUT_PREFIX}/{video_name}/{json_filename}.wav" |
|
|
|
|
|
|
|
|
max_scale = max(crop_w, crop_h) |
|
|
if max_scale > max_video_res: |
|
|
scale_w = int(crop_w / max_scale * max_video_res) |
|
|
scale_h = int(crop_h / max_scale * max_video_res) |
|
|
else: |
|
|
scale_w = crop_w |
|
|
scale_h = crop_h |
|
|
|
|
|
if scale_w % 2 != 0: |
|
|
scale_w -= 1 |
|
|
if scale_h % 2 != 0: |
|
|
scale_h -= 1 |
|
|
|
|
|
|
|
|
video_command = [ |
|
|
'/usr/bin/ffmpeg', |
|
|
'-y', |
|
|
'-ss', str(start_time), |
|
|
'-i', org_video_path, |
|
|
'-t', str(duration), |
|
|
'-vf', f'crop={crop_w}:{crop_h}:{crop_x}:{crop_y},scale={scale_w}:{scale_h}', |
|
|
'-an', |
|
|
'-q:v', '2', |
|
|
'-r', '25', |
|
|
'-vcodec', 'libx264', |
|
|
temp_video_path, |
|
|
] |
|
|
|
|
|
subprocess.run(video_command, check=True, capture_output=True, text=True) |
|
|
|
|
|
|
|
|
audio_command = [ |
|
|
'/usr/bin/ffmpeg', |
|
|
'-y', |
|
|
'-i', org_video_path, |
|
|
'-ss', str(start_time), |
|
|
'-t', str(duration), |
|
|
'-vn', |
|
|
'-acodec', 'pcm_s16le', |
|
|
'-ar', '16000', |
|
|
'-ac', '1', |
|
|
temp_audio_path |
|
|
] |
|
|
|
|
|
subprocess.run(audio_command, check=True, capture_output=True, text=True) |
|
|
|
|
|
|
|
|
if not upload_to_s3(S3_BUCKET, temp_video_path, s3_video_key): |
|
|
return False, f"{json_filename}: 视频上传失败" |
|
|
|
|
|
if not upload_to_s3(S3_BUCKET, temp_audio_path, s3_audio_key): |
|
|
return False, f"{json_filename}: 音频上传失败" |
|
|
|
|
|
|
|
|
if os.path.exists(temp_video_path): |
|
|
os.remove(temp_video_path) |
|
|
if os.path.exists(temp_audio_path): |
|
|
os.remove(temp_audio_path) |
|
|
|
|
|
return True, json_filename |
|
|
|
|
|
except subprocess.CalledProcessError as e: |
|
|
return False, f"{json_filename}: FFmpeg 错误 - {e.stderr[:200] if e.stderr else 'unknown'}" |
|
|
except Exception as e: |
|
|
return False, f"{json_filename}: 处理错误 - {str(e)}" |
|
|
|
|
|
|
|
|
def process_video(args): |
|
|
"""处理单个视频的所有clips(用于并行处理)""" |
|
|
video_id, json_list, temp_base_dir = args |
|
|
|
|
|
video_name = video_id + '.mp4' |
|
|
s3_key = f"{S3_PREFIX}/{video_name}" |
|
|
|
|
|
|
|
|
if not check_s3_file_exists(S3_BUCKET, s3_key): |
|
|
return video_name, "missing", 0, 0 |
|
|
|
|
|
|
|
|
temp_dir = os.path.join(temp_base_dir, f"video_{os.getpid()}_{video_id}") |
|
|
os.makedirs(temp_dir, exist_ok=True) |
|
|
|
|
|
try: |
|
|
|
|
|
local_video_path = os.path.join(temp_dir, video_name) |
|
|
|
|
|
if not download_from_s3(S3_BUCKET, s3_key, local_video_path): |
|
|
return video_name, "download_failed", 0, 0 |
|
|
|
|
|
|
|
|
success_count = 0 |
|
|
fail_count = 0 |
|
|
|
|
|
clip_args = [ |
|
|
(local_video_path, json_file, temp_dir, MAX_VIDEO_RES) |
|
|
for json_file in json_list |
|
|
] |
|
|
|
|
|
|
|
|
with ThreadPoolExecutor(max_workers=NUM_CLIP_WORKERS) as executor: |
|
|
futures = [executor.submit(process_single_clip, arg) for arg in clip_args] |
|
|
for future in futures: |
|
|
try: |
|
|
success, msg = future.result() |
|
|
if success: |
|
|
success_count += 1 |
|
|
else: |
|
|
fail_count += 1 |
|
|
except Exception as e: |
|
|
fail_count += 1 |
|
|
|
|
|
return video_name, "processed", success_count, fail_count |
|
|
|
|
|
finally: |
|
|
|
|
|
if os.path.exists(temp_dir): |
|
|
shutil.rmtree(temp_dir, ignore_errors=True) |
|
|
|
|
|
|
|
|
def main(): |
|
|
print(f"CPU 数量: {cpu_count()}") |
|
|
print(f"视频并行数: {NUM_VIDEO_WORKERS}") |
|
|
print(f"Clip 并行数: {NUM_CLIP_WORKERS}") |
|
|
print(f"预计同时处理: {NUM_VIDEO_WORKERS * NUM_CLIP_WORKERS} 个 FFmpeg 进程") |
|
|
print() |
|
|
|
|
|
|
|
|
print(f"正在扫描 annotation 文件... {MERGED_ANNO_DIR}") |
|
|
json_data = organize_videos_from_folder(MERGED_ANNO_DIR) |
|
|
print(f"共找到 {len(json_data)} 个视频的 annotation 数据") |
|
|
print(f"输出目标: s3://{S3_BUCKET}/{S3_OUTPUT_PREFIX}/") |
|
|
|
|
|
|
|
|
os.makedirs("/data/tmp", exist_ok=True) |
|
|
temp_base_dir = tempfile.mkdtemp(prefix="s3_video_parallel_", dir="/data/tmp") |
|
|
print(f"临时目录: {temp_base_dir}") |
|
|
|
|
|
|
|
|
missing_videos = [] |
|
|
download_failed = [] |
|
|
total_success = 0 |
|
|
total_fail = 0 |
|
|
|
|
|
try: |
|
|
|
|
|
video_args = [ |
|
|
(video_id, json_list, temp_base_dir) |
|
|
for video_id, json_list in json_data.items() |
|
|
] |
|
|
|
|
|
|
|
|
with ProcessPoolExecutor(max_workers=NUM_VIDEO_WORKERS) as executor: |
|
|
futures = {executor.submit(process_video, arg): arg[0] for arg in video_args} |
|
|
|
|
|
with tqdm(total=len(futures), desc="处理视频") as pbar: |
|
|
for future in as_completed(futures): |
|
|
video_id = futures[future] |
|
|
try: |
|
|
video_name, status, success, fail = future.result() |
|
|
|
|
|
if status == "missing": |
|
|
missing_videos.append(video_name) |
|
|
pbar.set_postfix_str(f"缺失: {video_name}") |
|
|
elif status == "download_failed": |
|
|
download_failed.append(video_name) |
|
|
pbar.set_postfix_str(f"下载失败: {video_name}") |
|
|
else: |
|
|
total_success += success |
|
|
total_fail += fail |
|
|
pbar.set_postfix_str(f"完成: {video_name} ({success}✓/{fail}✗)") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\n处理 {video_id} 时发生错误: {e}") |
|
|
|
|
|
pbar.update(1) |
|
|
|
|
|
finally: |
|
|
|
|
|
if os.path.exists(temp_base_dir): |
|
|
shutil.rmtree(temp_base_dir, ignore_errors=True) |
|
|
print(f"\n已清理临时目录: {temp_base_dir}") |
|
|
|
|
|
|
|
|
all_failed = missing_videos + download_failed |
|
|
if all_failed: |
|
|
with open(MISSING_VIDEOS_FILE, 'w') as f: |
|
|
for video in all_failed: |
|
|
f.write(video + '\n') |
|
|
print(f"\n缺失/失败视频已保存到: {MISSING_VIDEOS_FILE}") |
|
|
|
|
|
|
|
|
print("\n" + "=" * 50) |
|
|
print("处理完成统计:") |
|
|
print(f" 成功处理 clips: {total_success}") |
|
|
print(f" 失败 clips: {total_fail}") |
|
|
print(f" 缺失视频: {len(missing_videos)}") |
|
|
print(f" 下载失败: {len(download_failed)}") |
|
|
print("=" * 50) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|
|
|
|