| |
| |
| |
| |
|
|
| from typing import List, Tuple |
| import csv |
| import errno |
| import hashlib |
| import logging |
| import os |
| import sys |
| import tarfile |
| import threading |
| import zipfile |
| from _io import TextIOWrapper |
| from queue import Queue |
| from typing import Any, Iterable, List, Optional, Tuple, Union |
|
|
| import torch |
| import urllib |
| import urllib.request |
| from torch.utils.data import Dataset |
| from torch.utils.model_zoo import tqdm |
|
|
|
|
| def get_audio_files(manifest_path: str) -> Tuple[str, List[str], List[int]]: |
| fnames, sizes = [], [] |
| with open(manifest_path, "r") as f: |
| root_dir = f.readline().strip() |
| for line in f: |
| items = line.strip().split("\t") |
| assert ( |
| len(items) == 2 |
| ), f"File must have two columns separated by tab. Got {line}" |
| fnames.append(items[0]) |
| sizes.append(int(items[1])) |
| return root_dir, fnames, sizes |
|
|
|
|
| def download_url( |
| url: str, |
| download_folder: str, |
| filename: Optional[str] = None, |
| hash_value: Optional[str] = None, |
| hash_type: str = "sha256", |
| progress_bar: bool = True, |
| resume: bool = False, |
| ) -> None: |
| """Download file to disk. |
| |
| Args: |
| url (str): Url. |
| download_folder (str): Folder to download file. |
| filename (str, optional): Name of downloaded file. If None, it is inferred from the url (Default: ``None``). |
| hash_value (str, optional): Hash for url (Default: ``None``). |
| hash_type (str, optional): Hash type, among "sha256" and "md5" (Default: ``"sha256"``). |
| progress_bar (bool, optional): Display a progress bar (Default: ``True``). |
| resume (bool, optional): Enable resuming download (Default: ``False``). |
| """ |
|
|
| req = urllib.request.Request(url, method="HEAD") |
| req_info = urllib.request.urlopen(req).info() |
|
|
| |
| filename = filename or req_info.get_filename() or os.path.basename(url) |
| filepath = os.path.join(download_folder, filename) |
| if resume and os.path.exists(filepath): |
| mode = "ab" |
| local_size: Optional[int] = os.path.getsize(filepath) |
|
|
| elif not resume and os.path.exists(filepath): |
| raise RuntimeError( |
| "{} already exists. Delete the file manually and retry.".format(filepath) |
| ) |
| else: |
| mode = "wb" |
| local_size = None |
|
|
| if hash_value and local_size == int(req_info.get("Content-Length", -1)): |
| with open(filepath, "rb") as file_obj: |
| if validate_file(file_obj, hash_value, hash_type): |
| return |
| raise RuntimeError( |
| "The hash of {} does not match. Delete the file manually and retry.".format( |
| filepath |
| ) |
| ) |
|
|
| with open(filepath, mode) as fpointer: |
| for chunk in stream_url(url, start_byte=local_size, progress_bar=progress_bar): |
| fpointer.write(chunk) |
|
|
| with open(filepath, "rb") as file_obj: |
| if hash_value and not validate_file(file_obj, hash_value, hash_type): |
| raise RuntimeError( |
| "The hash of {} does not match. Delete the file manually and retry.".format( |
| filepath |
| ) |
| ) |
|
|
|
|
| def validate_file(file_obj: Any, hash_value: str, hash_type: str = "sha256") -> bool: |
| """Validate a given file object with its hash. |
| |
| Args: |
| file_obj: File object to read from. |
| hash_value (str): Hash for url. |
| hash_type (str, optional): Hash type, among "sha256" and "md5" (Default: ``"sha256"``). |
| |
| Returns: |
| bool: return True if its a valid file, else False. |
| """ |
|
|
| if hash_type == "sha256": |
| hash_func = hashlib.sha256() |
| elif hash_type == "md5": |
| hash_func = hashlib.md5() |
| else: |
| raise ValueError |
|
|
| while True: |
| |
| chunk = file_obj.read(1024**2) |
| if not chunk: |
| break |
| hash_func.update(chunk) |
|
|
| return hash_func.hexdigest() == hash_value |
|
|
|
|
| def extract_archive( |
| from_path: str, to_path: Optional[str] = None, overwrite: bool = False |
| ) -> List[str]: |
| """Extract archive. |
| Args: |
| from_path (str): the path of the archive. |
| to_path (str, optional): the root path of the extraced files (directory of from_path) (Default: ``None``) |
| overwrite (bool, optional): overwrite existing files (Default: ``False``) |
| |
| Returns: |
| list: List of paths to extracted files even if not overwritten. |
| |
| Examples: |
| >>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz' |
| >>> from_path = './validation.tar.gz' |
| >>> to_path = './' |
| >>> torchaudio.datasets.utils.download_from_url(url, from_path) |
| >>> torchaudio.datasets.utils.extract_archive(from_path, to_path) |
| """ |
|
|
| if to_path is None: |
| to_path = os.path.dirname(from_path) |
|
|
| try: |
| with tarfile.open(from_path, "r") as tar: |
| logging.info("Opened tar file {}.".format(from_path)) |
| files = [] |
| for file_ in tar: |
| file_path = os.path.join(to_path, file_.name) |
| if file_.isfile(): |
| files.append(file_path) |
| if os.path.exists(file_path): |
| logging.info("{} already extracted.".format(file_path)) |
| if not overwrite: |
| continue |
| tar.extract(file_, to_path) |
| return files |
| except tarfile.ReadError: |
| pass |
|
|
| try: |
| with zipfile.ZipFile(from_path, "r") as zfile: |
| logging.info("Opened zip file {}.".format(from_path)) |
| files = zfile.namelist() |
| for file_ in files: |
| file_path = os.path.join(to_path, file_) |
| if os.path.exists(file_path): |
| logging.info("{} already extracted.".format(file_path)) |
| if not overwrite: |
| continue |
| zfile.extract(file_, to_path) |
| return files |
| except zipfile.BadZipFile: |
| pass |
|
|
| raise NotImplementedError("We currently only support tar.gz, tgz, and zip achives.") |
|
|