| from huggingface_hub import HfApi | |
| def upload_huge_dataset(dataset_root, repo_id): | |
| api = HfApi() | |
| print(f"π Starting Resilient Upload for {repo_id}...") | |
| print("This will automatically split your 16k+ files into multiple commits.") | |
| try: | |
| api.upload_large_folder( | |
| folder_path=dataset_root, | |
| repo_id=repo_id, | |
| repo_type="dataset", | |
| # Optional: Ignore hidden files or cache | |
| ignore_patterns=[".git", ".cache", "**/.ipynb_checkpoints"], | |
| # num_workers=4 can speed it up, but be careful with bandwidth | |
| num_workers=4 | |
| ) | |
| print("β Upload Complete! Check your repo.") | |
| except Exception as e: | |
| print(f"β Error: {e}") | |
| print("π‘ If it failed halfway, just run this script again. It will resume where it left off!") | |
| # --- CONFIG --- | |
| # Point this to your "atc_dataset" folder where 'train_audio', 'test_audio' are. | |
| local_folder = "." | |
| repo = "MrEzzat/atc-dataset" | |
| upload_huge_dataset(local_folder, repo) | |