| | import json |
| | import sys, os, os.path as osp |
| | import asyncio |
| |
|
| | import requests |
| | import fire |
| | import pandas as pd |
| | from random import random |
| | from concurrent.futures import ProcessPoolExecutor |
| |
|
| |
|
| | def ytb_download(uid, url, json_info, output_dir="ytb_videos/"): |
| | |
| | os.makedirs(output_dir, exist_ok=True) |
| | |
| |
|
| | video_path = osp.join(output_dir, f"{uid}.mp4") |
| | meta_path = osp.join(output_dir, f"{uid}.json") |
| | if osp.exists(video_path) and osp.exists(meta_path): |
| | print(f"{uid} already downloaded.") |
| | return 0 |
| |
|
| | if osp.exists(video_path): |
| | print(f"[video] {uid} already downloaded.") |
| | else: |
| | with requests.get(url) as resp: |
| | if resp.status_code != 200: |
| | print(f"{uid} failed to fetch.") |
| | return -1 |
| | print(f"downloading {uid}: {url} to {output_dir}") |
| | open(video_path, "wb").write(resp.content) |
| | |
| | if not osp.exists(meta_path): |
| | with open(osp.join(output_dir, f"{uid}.json"), "w") as fp: |
| | json.dump(json_info, fp, indent=2) |
| | return 0 |
| | |
| |
|
| |
|
| | async def main(csv_path, max_workers=2, shards=0, total=-1, limit=False): |
| | PPE = ProcessPoolExecutor(max_workers=max_workers) |
| | loop = asyncio.get_event_loop() |
| |
|
| | df = pd.read_csv(csv_path) |
| | output_dir = csv_path.split(".")[0] |
| |
|
| | tasks = [] |
| | data_list = df.iterrows() |
| | |
| | if total > 0: |
| | data_list = list(df.iterrows()) |
| | chunk = len(data_list) // total |
| | begin_idx = shards * chunk |
| | end_idx = (shards + 1) * chunk |
| | if shards == total - 1: |
| | end_idx = len(data_list) |
| | data_list = data_list[begin_idx:end_idx] |
| | print(f"Downloading total {len(data_list)} videos") |
| | |
| | for idx, (index, row) in enumerate(data_list): |
| | uid = row["videoid"] |
| | url = row["contentUrl"] |
| |
|
| | |
| | try: |
| | json_info = { |
| | "name": row["name"], |
| | "url": url, |
| | |
| | |
| | "duration": row["duration"], |
| | } |
| | except KeyError as e: |
| | print(row) |
| | print(row.keys()) |
| | print(e) |
| | exit(0) |
| |
|
| | tasks.append( |
| | loop.run_in_executor(PPE, ytb_download, uid, url, json_info, output_dir) |
| | ) |
| | if idx >= 20 and limit: |
| | break |
| | res = await asyncio.gather(*tasks) |
| |
|
| | print(f"[{sum(res)} / {len(res)}]") |
| | |
| | output_log = f"log_{output_dir}.json" |
| | |
| | key = "total" |
| | if total is not None: |
| | key = f"{shards}-of-{total}" |
| | |
| | json_output = {} |
| | if osp.exists(output_log): |
| | with open(output_log, "r") as fp: |
| | json_output = json.load(fp) |
| | |
| | json_output[key] = f"[{sum(res)} / {len(res)}]" |
| | |
| | with open(output_log, "w") as fp: |
| | json.dump(json_output, fp, indent=2) |
| |
|
| |
|
| | def entry(csv="panda70m_testing.csv", shards=0, total=-1, limit=False): |
| | asyncio.run(main(csv, shards=shards, total=total, limit=limit)) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | fire.Fire(entry) |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|