| """Ray 并行原始视频拉取工具。 |
| |
| 无需经过 V2VDatasetV1 的图像/音频预处理,直接把云端视频 |
| 下载到本地(或返回字节数据),并输出 data_id 便于后续 YOLO |
| 或其他处理流水线消费。 |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import itertools |
| import os |
| import tempfile |
| from typing import Dict, Iterable, Iterator, List, Optional |
|
|
| import ray |
| import requests |
|
|
|
|
| def load_file_names(file_list: str) -> List[str]: |
| with open(file_list, "r", encoding="utf-8") as f: |
| names = [line.strip() for line in f if line.strip()] |
| if not names: |
| raise ValueError(f"file_list {file_list} 为空") |
| return names |
|
|
|
|
| @ray.remote(num_cpus=1) |
| class VideoDownloadWorker: |
| def __init__( |
| self, |
| file_names: List[str], |
| base_url: str, |
| timeout: float = 30.0, |
| store_dir: Optional[str] = None, |
| return_bytes: bool = False, |
| max_retries: int = 3, |
| ) -> None: |
| self.file_names = file_names |
| self.base_url = base_url.rstrip("/") |
| self.timeout = timeout |
| self.store_dir = store_dir |
| self.return_bytes = return_bytes |
| self.max_retries = max_retries |
| if self.store_dir: |
| os.makedirs(self.store_dir, exist_ok=True) |
|
|
| def _allocate_path(self, data_id: int, file_name: str) -> str: |
| if self.store_dir: |
| basename = os.path.basename(file_name) |
| return os.path.join(self.store_dir, f"{data_id}_{basename}") |
| suffix = os.path.splitext(file_name)[-1] or ".mp4" |
| tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix) |
| tmp.close() |
| return tmp.name |
|
|
| def fetch(self, data_id: int) -> Dict: |
| index = data_id % len(self.file_names) |
| file_name = self.file_names[index] |
| url = f"{self.base_url}/{file_name}" |
|
|
| last_error = None |
| video_bytes = None |
| for attempt in range(1, self.max_retries + 1): |
| try: |
| response = requests.get(url, timeout=self.timeout) |
| response.raise_for_status() |
| video_bytes = response.content |
| break |
| except Exception as exc: |
| last_error = exc |
|
|
| if video_bytes is None: |
| return { |
| "data_id": data_id, |
| "file_name": file_name, |
| "url": url, |
| "failed": True, |
| "error": str(last_error) if last_error else "unknown error", |
| } |
|
|
| target_path = self._allocate_path(data_id, file_name) |
| with open(target_path, "wb") as fp: |
| fp.write(video_bytes) |
|
|
| record = { |
| "data_id": data_id, |
| "file_name": file_name, |
| "url": url, |
| "path": target_path, |
| } |
| if self.return_bytes: |
| record["bytes"] = video_bytes |
| return record |
|
|
|
|
| class RayVideoFetcher: |
| def __init__( |
| self, |
| file_list: str, |
| base_url: str, |
| num_workers: int = 4, |
| timeout: float = 30.0, |
| store_dir: Optional[str] = None, |
| return_bytes: bool = False, |
| max_retries: int = 3, |
| ray_address: Optional[str] = None, |
| ) -> None: |
| self.file_names = load_file_names(file_list) |
| self.dataset_size = len(self.file_names) |
|
|
| if not ray.is_initialized(): |
| ray.init(address=ray_address, ignore_reinit_error=True) |
|
|
| worker_args = { |
| "file_names": self.file_names, |
| "base_url": base_url, |
| "timeout": timeout, |
| "store_dir": store_dir, |
| "return_bytes": return_bytes, |
| "max_retries": max_retries, |
| } |
| self.workers = [VideoDownloadWorker.remote(**worker_args) for _ in range(num_workers)] |
|
|
| def fetch_batch(self, indices: Iterable[int]) -> List[Dict]: |
| indices = list(indices) |
| if not indices: |
| return [] |
|
|
| order = {idx: pos for pos, idx in enumerate(indices)} |
| tasks = [ |
| self.workers[i % len(self.workers)].fetch.remote(idx) |
| for i, idx in enumerate(indices) |
| ] |
| outputs = ray.get(tasks) |
| outputs.sort(key=lambda rec: order[rec["data_id"]]) |
| return outputs |
|
|
| def stream( |
| self, |
| start: int = 0, |
| stop: Optional[int] = None, |
| batch_size: int = 4, |
| ) -> Iterator[List[Dict]]: |
| if stop is None: |
| stop = self.dataset_size |
|
|
| next_idx = start |
| while next_idx < stop: |
| batch_indices = list( |
| itertools.islice(range(next_idx, stop), batch_size) |
| ) |
| if not batch_indices: |
| break |
| next_idx = batch_indices[-1] + 1 |
| yield self.fetch_batch(batch_indices) |
|
|
|
|
| def demo(args: argparse.Namespace) -> None: |
| fetcher = RayVideoFetcher( |
| file_list=args.file_list, |
| base_url=args.base_url, |
| num_workers=args.num_workers, |
| timeout=args.timeout, |
| store_dir=args.store_dir, |
| return_bytes=args.return_bytes, |
| max_retries=args.max_retries, |
| ) |
|
|
| processed = 0 |
| for batch in fetcher.stream(start=args.start, stop=args.stop, batch_size=args.batch_size): |
| for record in batch: |
| if record.get("failed"): |
| print( |
| f"data_id={record['data_id']}, file={record['file_name']} 下载失败: " |
| f"{record.get('error', 'unknown error')}" |
| ) |
| continue |
|
|
| processed += 1 |
| print( |
| f"data_id={record['data_id']}, file={record['file_name']}, " |
| f"saved_to={record['path']}" |
| ) |
| if args.return_bytes: |
| print(f" bytes={len(record['bytes'])}") |
| if args.limit and processed >= args.limit: |
| return |
|
|
|
|
| def parse_args() -> argparse.Namespace: |
| parser = argparse.ArgumentParser(description="Ray 并行视频下载示例") |
| parser.add_argument("--file-list", default="meta_data_training/file_list.txt") |
| parser.add_argument("--base-url", default="http://127.0.0.1:8081") |
| parser.add_argument("--num-workers", type=int, default=4) |
| parser.add_argument("--batch-size", type=int, default=4) |
| parser.add_argument("--start", type=int, default=0) |
| parser.add_argument("--stop", type=int, default=None, help="停止的 data_id (不含),默认拉到文件末尾") |
| parser.add_argument("--limit", type=int, default=0, help="demo 用,拉多少条后退出 (0=不限制)") |
| parser.add_argument("--timeout", type=float, default=30.0) |
| parser.add_argument("--store-dir", default=None, help="保存视频的目录;缺省用临时文件") |
| parser.add_argument("--return-bytes", action="store_true", help="除保存到文件外,返回二进制内容") |
| parser.add_argument("--max-retries", type=int, default=3) |
| return parser.parse_args() |
|
|
|
|
| if __name__ == "__main__": |
| demo(parse_args()) |
|
|