code
stringlengths
17
6.64M
def wav2vec2_conformer_large_s2st_en_librilight(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/en/conformer_L.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_conformer_large_s2st_en_librilight.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) checkpoint = torch.load(ckpt) self.cfg = WavLMConfig(checkpoint['cfg']) self.model = WavLM(self.cfg) self.model.load_state_dict(checkpoint['model']) self.model.feature_grad_mult = 0.0 self.model.encoder.layerdrop = 0.0 if (len(self.hooks) == 0): module_name = 'self.model.encoder.layers' for module_id in range(len(eval(module_name))): self.add_hook(f'{module_name}[{module_id}]', (lambda input, output: input[0].transpose(0, 1))) self.add_hook('self.model.encoder', (lambda input, output: output[0])) self._init_layerdrop = self.model.encoder.layerdrop @property def layer_drop(self): return self.model.encoder.layerdrop def set_layer_drop(self, layerdrop: float=None): if isinstance(layerdrop, float): self.model.encoder.layerdrop = layerdrop elif (layerdrop is None): self.model.encoder.layerdrop = self._init_layerdrop else: raise ValueError('layerdrop can only be float or None') def get_downsample_rates(self, key: str) -> int: return 320 def forward(self, wavs): if self.cfg.normalize: wavs = [F.layer_norm(wav, wav.shape) for wav in wavs] device = wavs[0].device wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device) wav_padding_mask = (~ torch.lt(torch.arange(max(wav_lengths)).unsqueeze(0).to(device), wav_lengths.unsqueeze(1))) padded_wav = pad_sequence(wavs, batch_first=True) (features, feat_padding_mask) = self.model.extract_features(padded_wav, padding_mask=wav_padding_mask, mask=False)
def wavlm_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def wavlm_url(ckpt, refresh=False, *args, **kwargs): '\n The model from google drive id\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n ' return wavlm_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def wavlm(refresh=False, *args, **kwargs): '\n The default model - Base-Plus\n refresh (bool): whether to download ckpt/config again if existed\n ' return wavlm_base_plus(*args, refresh=refresh, **kwargs)
def wavlm_base(refresh=False, *args, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wavlm_base.pt' return wavlm_url(*args, refresh=refresh, **kwargs)
def wavlm_base_plus(refresh=False, *args, **kwargs): '\n The Base-Plus model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wavlm_base_plus.pt' return wavlm_url(*args, refresh=refresh, **kwargs)
def wavlm_large(refresh=False, *args, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wavlm_large.pt' return wavlm_url(*args, refresh=refresh, **kwargs)
def get_cache_dir(): _default_cache_dir.mkdir(exist_ok=True, parents=True) return _default_cache_dir
def set_cache_dir(cache_dir: str): global _default_cache_dir _default_cache_dir = Path(cache_dir)
def get_audio_info(audio_paths: List[str], audio_ids: List[str], cache_dir: str=None, num_workers: int=6) -> List[dict]: '\n Use :code:`torchaudio.info` to retrieve the metadata from audio paths.\n The retrieved metadata is cached in :code:`cache_dir`\n ' cache_dir = (cache_dir or get_cache_dir()) cache_dir: Path = Path(cache_dir) def _get_info(audio_path: str, audio_id: str): cache_file = (cache_dir / f'{audio_id}.json') if cache_file.is_file(): with cache_file.open() as f: info = json.load(f) return info torchaudio.set_audio_backend('sox_io') torchaudio_info = torchaudio.info(audio_path) info = {'sample_rate': torchaudio_info.sample_rate, 'num_frames': torchaudio_info.num_frames, 'num_channels': torchaudio_info.num_channels, 'bits_per_sample': torchaudio_info.bits_per_sample, 'encoding': torchaudio_info.encoding} return info infos = Parallel(n_jobs=num_workers)((delayed(_get_info)(path, idx) for (path, idx) in tqdm(zip(audio_paths, audio_ids), desc='Get audio metadata'))) return infos
class benchmark(ContextDecorator): def __init__(self, name: str, freq: int=1) -> None: super().__init__() self.name = name self.freq = freq def __enter__(self): torch.cuda.synchronize() self.start = time() def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: torch.cuda.synchronize() seconds = (time() - self.start) global _history _history[self.name].append(seconds) if ((len(_history[self.name]) % self.freq) == 0): logger.warning(f'{self.name}: {seconds} secs, avg {np.array(_history[self.name]).mean()} secs')
def get_dir(): _download_dir.mkdir(exist_ok=True, parents=True) return _download_dir
def set_dir(d): global _download_dir _download_dir = Path(d)
def _download_url_to_file(url, dst, hash_prefix=None, progress=True): '\n This function is not thread-safe. Please ensure only a single\n thread or process can enter this block at the same time\n ' file_size = None req = Request(url, headers={'User-Agent': 'torch.hub'}) u = urlopen(req) meta = u.info() if hasattr(meta, 'getheaders'): content_length = meta.getheaders('Content-Length') else: content_length = meta.get_all('Content-Length') if ((content_length is not None) and (len(content_length) > 0)): file_size = int(content_length[0]) dst = os.path.expanduser(dst) dst_dir = os.path.dirname(dst) f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) try: if (hash_prefix is not None): sha256 = hashlib.sha256() tqdm.write(f'Downloading: {url}', file=sys.stderr) tqdm.write(f'Destination: {dst}', file=sys.stderr) with tqdm(total=file_size, disable=(not progress), unit='B', unit_scale=True, unit_divisor=1024) as pbar: while True: buffer = u.read(8192) if (len(buffer) == 0): break f.write(buffer) if (hash_prefix is not None): sha256.update(buffer) pbar.update(len(buffer)) f.close() if (hash_prefix is not None): digest = sha256.hexdigest() if (digest[:len(hash_prefix)] != hash_prefix): raise RuntimeError('invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def _download_url_to_file_requests(url, dst, hash_prefix=None, progress=True): '\n Alternative download when urllib.Request fails.\n ' req = requests.get(url, stream=True, headers={'User-Agent': 'torch.hub'}) file_size = int(req.headers['Content-Length']) dst = os.path.expanduser(dst) dst_dir = os.path.dirname(dst) f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) try: if (hash_prefix is not None): sha256 = hashlib.sha256() tqdm.write(f'urllib.Request method failed. Trying using another method...', file=sys.stderr) tqdm.write(f'Downloading: {url}', file=sys.stderr) tqdm.write(f'Destination: {dst}', file=sys.stderr) with tqdm(total=file_size, disable=(not progress), unit='B', unit_scale=True, unit_divisor=1024) as pbar: for chunk in req.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) if (hash_prefix is not None): sha256.update(chunk) pbar.update(len(chunk)) f.close() if (hash_prefix is not None): digest = sha256.hexdigest() if (digest[:len(hash_prefix)] != hash_prefix): raise RuntimeError('invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def _download(filepath: Path, url, refresh: bool, new_enough_secs: float=2.0): '\n If refresh is True, check the latest modfieid time of the filepath.\n If the file is new enough (no older than `new_enough_secs`), than directly use it.\n If the file is older than `new_enough_secs`, than re-download the file.\n This function is useful when multi-processes are all downloading the same large file\n ' Path(filepath).parent.mkdir(exist_ok=True, parents=True) lock_file = Path((str(filepath) + '.lock')) logger.info(f'Requesting URL: {url}') with FileLock(str(lock_file)): if ((not filepath.is_file()) or (refresh and ((time.time() - os.path.getmtime(filepath)) > new_enough_secs))): try: _download_url_to_file(url, filepath) except: _download_url_to_file_requests(url, filepath) logger.info(f"Using URL's local file: {filepath}")
def _urls_to_filepaths(*args, refresh=False, download: bool=True): '\n Preprocess the URL specified in *args into local file paths after downloading\n\n Args:\n Any number of URLs (1 ~ any)\n\n Return:\n Same number of downloaded file paths\n ' def _url_to_filepath(url): assert isinstance(url, str) m = hashlib.sha256() m.update(str.encode(url)) filepath = (get_dir() / f'{str(m.hexdigest())}.{Path(url).name}') if download: _download(filepath, url, refresh=refresh) return str(filepath.resolve()) paths = [_url_to_filepath(url) for url in args] return (paths if (len(paths) > 1) else paths[0])
def parse_override(string): '\n Example usgae:\n -o "optimizer.lr=1.0e-3,,optimizer.name=\'AdamW\',,runner.eval_dataloaders=[\'dev\', \'test\']"\n\n Convert to:\n {\n "optimizer": {"lr": 1.0e-3, "name": "AdamW"},\n "runner": {"eval_dataloaders": ["dev", "test"]}\n }\n ' options = string.split(',,') config = {} for option in options: option = option.strip() (key, value_str) = option.split('=') (key, value_str) = (key.strip(), value_str.strip()) remaining = key.split('.') try: value = eval(value_str) except: value = value_str logger.info(f'{key} = {value}') target_config = config for (i, field_name) in enumerate(remaining): if (i == (len(remaining) - 1)): target_config[field_name] = value else: target_config.setdefault(field_name, {}) target_config = target_config[field_name] return config
def parse_overrides(options: list): '\n Example usgae:\n [\n "--optimizer.lr",\n "1.0e-3",\n "--optimizer.name",\n "AdamW",\n "--runner.eval_dataloaders",\n "[\'dev\', \'test\']",\n ]\n\n Convert to:\n {\n "optimizer": {"lr": 1.0e-3, "name": "AdamW"},\n "runner": {"eval_dataloaders": ["dev", "test"]}\n }\n ' config = {} for position in range(0, len(options), 2): key: str = options[position] assert key.startswith('--') key = key.strip('--') value_str: str = options[(position + 1)] (key, value_str) = (key.strip(), value_str.strip()) remaining = key.split('.') try: value = eval(value_str) except Exception as e: if (('newdict' in value_str) or ('Container' in value_str)): raise value = value_str logger.debug(f'{key} = {value}') target_config = config for (i, field_name) in enumerate(remaining): if (i == (len(remaining) - 1)): target_config[field_name] = value else: target_config.setdefault(field_name, {}) target_config = target_config[field_name] return config
class pseudo_audio(): '\n This context manager returns filepaths (List[str]) and num_samples (List[int]) on entering\n ' def __init__(self, secs: List[float], sample_rate: int=SAMPLE_RATE): self.tempdir = Path(tempfile.TemporaryDirectory().name) self.tempdir.mkdir(parents=True, exist_ok=True) self.num_samples = [] for (n, sec) in enumerate(secs): wav = torch.randn(1, round((sample_rate * sec))) torchaudio.save(str((self.tempdir / f'audio_{n}.wav')), wav, sample_rate=sample_rate) self.num_samples.append(wav.size((- 1))) self.filepaths = [str((self.tempdir / f'audio_{i}.wav')) for i in range(len(secs))] def __enter__(self): return (self.filepaths, self.num_samples) def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: shutil.rmtree(self.tempdir)
def get_pseudo_wavs(seed: int=0, n: int=2, min_secs: int=1, max_secs: int=3, sample_rate: int=SAMPLE_RATE, device: str='cpu', padded: bool=False): random.seed(seed) torch.manual_seed(seed) wavs = [] wavs_len = [] for _ in range(n): wav_length = random.randint((min_secs * sample_rate), (max_secs * sample_rate)) wav = torch.randn(wav_length, requires_grad=True).to(device) wavs_len.append(wav_length) wavs.append(wav) if (not padded): return wavs else: return (pad_sequence(wavs, batch_first=True), torch.LongTensor(wavs_len))
def fix_random_seeds(seed: int=1337) -> None: 'Fixes all random seeds, including cuDNN backends.\n\n Args:\n seed (int, optional): Random seed. Defaults to 1337.\n ' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True
def read_file(path, callback=(lambda x: x), sep=' ', default_value=''): content = {} with open(path, 'r') as file: lines = file.readlines() for (idx, line) in enumerate(lines): fields = line.strip().split(sep, maxsplit=1) if (len(fields) > 1): (filename, value) = fields else: filename = fields[0] value = default_value content[filename] = callback(value) return content
def get_ddp_sampler(dataset: Dataset, epoch: int): '\n This function will create a DistributedSampler if DDP is initialized,\n and will just return None if DDP is not initialized.\n ' if is_initialized(): sampler = DistributedSampler(dataset) sampler.set_epoch(epoch) else: sampler = None return sampler
def _download(filename, url, refresh, agent): dirpath = f'{torch.hub.get_dir()}/s3prl_cache' os.makedirs(dirpath, exist_ok=True) filepath = f'{dirpath}/{filename}' with FileLock((filepath + '.lock')): if ((not os.path.isfile(filepath)) or refresh): if (agent == 'wget'): os.system(f'wget {url} -O {filepath}') elif (agent == 'gdown'): gdown.download(url, filepath, use_cookies=False) else: print("[Download] - Unknown download agent. Only 'wget' and 'gdown' are supported.") raise NotImplementedError else: print(f'''Using cache found in {filepath} for {url}''') return filepath
def _urls_to_filepaths(*args, refresh=False, agent='wget'): '\n Preprocess the URL specified in *args into local file paths after downloading\n\n Args:\n Any number of URLs (1 ~ any)\n\n Return:\n Same number of downloaded file paths\n ' def url_to_filename(url): assert (type(url) is str) m = hashlib.sha256() m.update(str.encode(url)) return str(m.hexdigest()) def url_to_path(url, refresh): if ((type(url) is str) and (len(url) > 0)): return _download(url_to_filename(url), url, refresh, agent=agent) else: return None paths = [url_to_path(url, refresh) for url in args] return (paths if (len(paths) > 1) else paths[0])
def _gdriveids_to_filepaths(*args, refresh=False): '\n Preprocess the Google Drive id specified in *args into local file paths after downloading\n\n Args:\n Any number of Google Drive ids (1 ~ any)\n\n Return:\n Same number of downloaded file paths\n ' def gdriveid_to_url(gdriveid): if ((type(gdriveid) is str) and (len(gdriveid) > 0)): return f'https://drive.google.com/uc?id={gdriveid}' else: return None return _urls_to_filepaths(*[gdriveid_to_url(gid) for gid in args], refresh=refresh, agent='gdown')
def check_model_equiv(model1, model2): for (p1, p2) in zip(model1.parameters(), model2.parameters()): if (p1.data.ne(p2.data).sum() > 0): return False if (not torch.equal(p1[0], p2[0])): return False if (not torch.equal(p1[1].data, p2[1].data)): return False return True
def copyParams(module_src, module_dest): params1 = module_src.named_parameters() params2 = module_dest.named_parameters() dict_params2 = dict(params2) for (name1, param1) in params1: if (name1 in dict_params2): dict_params2[name1].data.copy_(param1.data)
def main(): input_ckpt = sys.argv[1] from transformer.nn_transformer import SPEC_TRANSFORMER options = {'ckpt_file': input_ckpt, 'load_pretrain': 'True', 'no_grad': 'True', 'dropout': 'default', 'spec_aug': 'False', 'spec_aug_prev': 'True', 'weighted_sum': 'False', 'select_layer': (- 1), 'permute_input': 'False'} old_transformer = SPEC_TRANSFORMER(options, inp_dim=(- 1)) from s3prl.upstream.mockingjay.model import TransformerForMaskedAcousticModel model = TransformerForMaskedAcousticModel(old_transformer.model_config, old_transformer.inp_dim, old_transformer.inp_dim).to(torch.device('cuda')) assert (not check_model_equiv(old_transformer.model, model.Transformer)) copyParams(old_transformer.model, model.Transformer) assert check_model_equiv(old_transformer.model, model.Transformer) assert (not check_model_equiv(old_transformer.SpecHead, model.SpecHead)) copyParams(old_transformer.SpecHead, model.SpecHead) assert check_model_equiv(old_transformer.SpecHead, model.SpecHead) global_step = old_transformer.all_states['Global_step'] settings = old_transformer.all_states['Settings'] all_states = {'SpecHead': model.SpecHead.state_dict(), 'Transformer': model.Transformer.state_dict(), 'Global_step': global_step, 'Settings': settings} new_ckpt_path = input_ckpt.replace('.ckpt', '-new.ckpt') torch.save(all_states, new_ckpt_path) print('Done fixing ckpt: ', input_ckpt, 'to: ', new_ckpt_path)
def main(): log_file = str(sys.argv[1]) ckpts = glob.glob((os.path.dirname(log_file) + '/states-*.ckpt')) sorted_ckpts = sorted(ckpts, key=(lambda ckpt: int(ckpt.split('.')[0].split('-')[(- 1)]))) print(f'The last ckpt: {sorted_ckpts[(- 1)]}') if (len(sys.argv) == 3): stop_step = int(sys.argv[2]) else: stop_step = 99999999 (best_dev, best_step, best_test) = (None, None, None) with open(log_file) as f: lines = f.readlines() for line in lines: line = line.strip('\n').split(' ') if (line[0].lower() == 'new'): best_dev = line[(- 1)] best_step = line[(- 2)].strip(':') if (line[0].lower() == 'test'): if (line[(- 2)].strip(':') == best_step): best_test = line[(- 1)] if (int(line[(- 2)].strip(':')) > stop_step): break print(f'The best dev score {best_dev} at step {best_step}, accoupanied by this test score {best_test}')
def main(): log_file = str(sys.argv[1]) if (len(sys.argv) == 4): rank_by = str(sys.argv[2]) target = str(sys.argv[3]) large_or_small = str(sys.argv[4]) else: rank_by = 'dev' target = 'test' large_or_small = '+' best_record = [(- 99999), 0, None] if (large_or_small == '-'): best_record[0] *= (- 1) with open(log_file) as f: lines = f.readlines() for line in lines: line = line.strip('\n').split('/')[(- 1)].split('|') if (len(line) < 3): continue prefix = str(line[0].split(':')[(- 1)]) step = int(line[1].split(':')[(- 1)]) score = float(line[2].split(':')[(- 1)]) if (rank_by in prefix): if compare(score, best_record[0], large_or_small): best_record[0] = score best_record[1] = step elif ((step == best_record[1]) and (target in prefix)): best_record[2] = score print(f'The best {rank_by} score {best_record[0]} at step {best_record[1]}, accoupanied by this {target} score {best_record[2]}')
def compare(a, b, large_or_small): if (large_or_small == '+'): return (a > b) elif (large_or_small == '-'): return (a < b) else: raise ValueError(large_or_small)
def is_leader_process(): return ((not is_initialized()) or (get_rank() == 0))
def count_parameters(model): return sum((p.numel() for p in model.parameters() if p.requires_grad))
def count_used_parameters(model): return sum((p.numel() for p in model.parameters() if (p.grad is not None)))
def get_time_tag(): return datetime.fromtimestamp(time()).strftime('%Y-%m-%d-%H-%M-%S')
def backup(src_path, tgt_dir): stem = Path(src_path).stem suffix = Path(src_path).suffix shutil.copyfile(src_path, os.path.join(tgt_dir, f'{stem}_{get_time_tag()}{suffix}'))
def get_model_state(model): if isinstance(model, DDP): return model.module.state_dict() return model.state_dict()
def show(*args, **kwargs): if is_leader_process(): print(*args, **kwargs)
def hack_isinstance(): _isinstance = builtins.isinstance def isinstance(obj, cls): if _isinstance(obj, defaultdict): return (_isinstance(obj, cls) and issubclass(cls, defaultdict)) return _isinstance(obj, cls) builtins.isinstance = isinstance
def override(string, args, config): '\n Example usgae:\n -o "config.optimizer.lr=1.0e-3,,config.optimizer.name=\'AdamW\',,config.runner.eval_dataloaders=[\'dev\', \'test\']"\n ' options = string.split(',,') for option in options: option = option.strip() (key, value_str) = option.split('=') (key, value_str) = (key.strip(), value_str.strip()) (first_field, *remaining) = key.split('.') try: value = eval(value_str) except: value = value_str print(f'[Override] - {key} = {value}', file=sys.stderr) if (first_field == 'args'): assert (len(remaining) == 1) setattr(args, remaining[0], value) elif (first_field == 'config'): target_config = config for (i, field_name) in enumerate(remaining): if (i == (len(remaining) - 1)): target_config[field_name] = value else: target_config.setdefault(field_name, {}) target_config = target_config[field_name]
def zero_mean_unit_var_norm(input_values: List[np.ndarray]) -> List[np.ndarray]: '\n Every array in the list is normalized to have zero mean and unit variance\n Taken from huggingface to ensure the same behavior across s3prl and huggingface\n Reference: https://github.com/huggingface/transformers/blob/a26f4d620874b32d898a5b712006a4c856d07de1/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L81-L86\n ' return [((x - np.mean(x)) / np.sqrt((np.var(x) + 1e-05))) for x in input_values]
def parse_prune_heads(config): if (('prune_headids' in config['transformer']) and (config['transformer']['prune_headids'] != 'None')): heads_int = [] spans = config['transformer']['prune_headids'].split(',') for span in spans: endpoints = span.split('-') if (len(endpoints) == 1): heads_int.append(int(endpoints[0])) elif (len(endpoints) == 2): heads_int += torch.arange(int(endpoints[0]), int(endpoints[1])).tolist() else: raise ValueError print(f'[PRUNING] - heads {heads_int} will be pruned') config['transformer']['prune_headids'] = heads_int else: config['transformer']['prune_headids'] = None
def get_transformer_tester(from_path='result/result_transformer/libri_sd1337_fmllrBase960-F-N-K-RA/model-1000000.ckpt', display_settings=False): ' Wrapper that loads the transformer model from checkpoint path ' all_states = torch.load(from_path, map_location='cpu') config = all_states['Settings']['Config'] paras = all_states['Settings']['Paras'] if (not hasattr(paras, 'multi_gpu')): setattr(paras, 'multi_gpu', False) if ('prune_headids' not in config['transformer']): config['transformer']['prune_headids'] = None if display_settings: for cluster in config: print((cluster + ':')) for item in config[cluster]: print((('\t' + str(item)) + ': '), config[cluster][item]) print('paras:') v_paras = vars(paras) for item in v_paras: print((('\t' + str(item)) + ': '), v_paras[item]) from transformer.solver import Tester tester = Tester(config, paras) tester.set_model(inference=True, with_head=False, from_path=from_path) return tester
def compute_lnsr(real, adve, norm_L2=True): real = real.reshape(real.shape[0], (- 1)) adve = adve.reshape(adve.shape[0], (- 1)) l2 = np.linalg.norm((real - adve), ord=2) if norm_L2: l2 /= np.linalg.norm(real, ord=2) return l2
def run_over_layer(layer, real_todo, adve_todo): if (layer != 'feature'): options = {'ckpt_file': 'result/result_transformer/mockingjay/LinearLarge-libri/model-500000.ckpt', 'load_pretrain': 'True', 'no_grad': 'True', 'dropout': 'default', 'spec_aug': 'False', 'spec_aug_prev': 'True', 'weighted_sum': 'False', 'select_layer': layer, 'permute_input': 'True'} mockingjay = TRANSFORMER(options=options, inp_dim=160) mockingjay.eval() episode = [] for i in range(len(real_todo)): r = torch.FloatTensor(np.load(real_todo[i])).unsqueeze(1) a = torch.FloatTensor(np.load(adve_todo[i])).unsqueeze(1) min_len = min(r.shape[0], a.shape[0]) if (layer == 'feature'): l2 = compute_lnsr(r[:min_len].data.cpu().numpy(), a[:min_len].data.cpu().numpy(), norm_L2=True) else: r_hidd = mockingjay(r[:min_len]) a_hidd = mockingjay(a[:min_len]) l2 = compute_lnsr(r_hidd.data.cpu().numpy(), a_hidd.data.cpu().numpy(), norm_L2=True) episode.append(l2) return np.mean(episode)
def main(): num_layers = 12 data_path = 'data/adversarial/' data_type = ['fgsm_8.0', 'fgsm_16.0', 'pgd_8.0', 'pgd_16.0'] data_type = data_type[0] real_data = os.path.join(data_path, 'original') adve_data = os.path.join(data_path, data_type) real_todo = sorted(list(Path(real_data).rglob('*.npy'))) adve_todo = sorted(list(Path(adve_data).rglob('*.npy'))) assert (len(real_todo) == len(adve_todo)) print('Number of data: ', len(real_todo)) dist = run_over_layer('feature', real_todo, adve_todo) print((('[Original v.s. ' + data_type) + '] Acoustic distance: '), dist) dist = [] for i in range(num_layers): m = run_over_layer(i, real_todo, adve_todo) dist.append(m) print('Layer: ', (i + 1), 'Mse: ', m) print((('[Original v.s. ' + data_type) + '] Hidden rep distance over all layers (1->12): '), dist)
def get_speaker_from_path(x): return x.split('/')[(- 1)].split('.')[0].split('-')[0]
def get_all_speakers(X): speaker_set = {} for x in X: speaker = get_speaker_from_path(x) if (speaker not in speaker_set): speaker_set[speaker] = 0 else: speaker_set[speaker] += 1 return speaker_set
def compute_speaker2idx(speakers): idx = 0 speaker2idx = {} for speaker in sorted(speakers): if ((speaker not in speaker2idx) and (speakers[speaker] > SPEAKER_THRESHOLD)): speaker2idx[speaker] = idx idx += 1 return speaker2idx
def main(): tables = pd.read_csv(os.path.join(root, ('train-clean-100' + '.csv'))) print('[Dataset] - Computing speaker class...') O = tables['file_path'].tolist() speakers = get_all_speakers(O) speaker2idx = compute_speaker2idx(speakers) class_num = len(speaker2idx) print('[Dataset] - Possible speaker classes: ', class_num) train = tables.sample(frac=0.9, random_state=20190929) test = tables.drop(train.index) table = train.sort_values(by=['length'], ascending=False) X = table['file_path'].tolist() X_lens = table['length'].tolist() if (drop and (max_timestep > 0)): table = table[(table.length < max_timestep)] if (drop and (max_label_len > 0)): table = table[((table.label.str.count('_') + 1) < max_label_len)] num_utt = [] for speaker in speakers: if (speaker in speaker2idx): num_utt.append(speakers[speaker]) print('Average utterance per speaker: ', np.mean(num_utt))
def print_cache_path(url: str, refresh: bool): print(_urls_to_filepaths(url, refresh=refresh))
def get_ttest_args(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--mode', choices=['ttest', 'fisher', 'mcnemar'], default='ttest') parser.add_argument('-em', '--evaluate_metric', default='acc') parser.add_argument('-t', '--evaluate_split', default='test') parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority') parser.add_argument('-e1', '--past_exp1', metavar='{CKPT_PATH,CKPT_DIR}', help='Load from a checkpoint') parser.add_argument('-e2', '--past_exp2', metavar='{CKPT_PATH,CKPT_DIR}', help='Load from another checkpoint') parser.add_argument('-u1', '--upstream1', default='default', type=str, help='used to override the upstream string for checkpoint e1') parser.add_argument('-u2', '--upstream2', default='default', type=str, help='used to override the upstream string for checkpoint e2') parser.add_argument('--seed', default=1337, type=int) parser.add_argument('--verbose', action='store_true', help='Print model infomation') parser.add_argument('--ckpt_name', default='best-states-dev', help='The string used for searching the checkpoint, example choices: `states-*`, `best-states-dev`, `best-states-test`.') args = parser.parse_args() (args1, config1) = get_past_exp(args, args.past_exp1, args.ckpt_name) (args2, config2) = get_past_exp(args, args.past_exp2, args.ckpt_name) if (args.upstream1 != 'default'): args1.upstream = args.upstream1 if (args.upstream2 != 'default'): args2.upstream = args.upstream2 return (args.mode, args1, config1, args2, config2)
def get_past_exp(args, past_exp, name): if os.path.isdir(past_exp): ckpt_pths = glob.glob(os.path.join(past_exp, f'{name}.ckpt')) assert (len(ckpt_pths) > 0) if (len(ckpt_pths) == 1): ckpt_pth = ckpt_pths[0] else: ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0]))) ckpt_pth = ckpt_pths[(- 1)] else: ckpt_pth = past_exp print(f'[Runner] - Loading from {ckpt_pth}') ckpt = torch.load(ckpt_pth, map_location='cpu') def update_args(old, new, preserve_list=None): out_dict = vars(old) new_dict = vars(new) for key in list(new_dict.keys()): if (key in preserve_list): new_dict.pop(key) out_dict.update(new_dict) return Namespace(**out_dict) cannot_overwrite_args = ['mode', 'evaluate_split', 'override', 'backend', 'local_rank', 'past_exp'] args = update_args(args, ckpt['Args'], preserve_list=cannot_overwrite_args) args.init_ckpt = ckpt_pth args.mode = 'evaluate' config = ckpt['Config'] if args.override: override(args.override, args, config) return (args, config)
class Tester(Runner): '\n Used to handle the evaluation loop and return the testing records for Paired Sample T-test.\n ' def __init__(self, args, config): super(Tester, self).__init__(args, config) def evaluate(self): 'evaluate function will always be called on a single process even during distributed training' split = self.args.evaluate_split random.seed(self.args.seed) np.random.seed(self.args.seed) torch.manual_seed(self.args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(self.args.seed) with torch.cuda.device(self.args.device): torch.cuda.empty_cache() self.downstream.eval() self.upstream.eval() dataloader = self.downstream.get_dataloader(split) records = defaultdict(list) for (batch_id, (wavs, *others)) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc=split)): wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs] with torch.no_grad(): features = self.upstream(wavs) self.downstream(split, features, *others, records=records) return records
def process_records(records, metric): assert ('sample_wise_metric' in records), 'Utterance-wise / sample-wise metric is necessary for proceeding the Paired Sample T-test.' average = torch.FloatTensor(records[metric]).mean().item() return (average, records['sample_wise_metric'])
def main(): torch.multiprocessing.set_sharing_strategy('file_system') torchaudio.set_audio_backend('sox_io') hack_isinstance() (mode, args1, config1, args2, config2) = get_ttest_args() random.seed(args1.seed) np.random.seed(args1.seed) torch.manual_seed(args1.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args1.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False tester1 = Tester(args1, config1) records1 = eval(f'tester1.{args1.mode}')() (average1, sample_metric1) = process_records(records1, args1.evaluate_metric) tester2 = Tester(args2, config2) records2 = eval(f'tester2.{args2.mode}')() (average2, sample_metric2) = process_records(records2, args2.evaluate_metric) if (mode == 'ttest'): (statistic, p_value) = stats.ttest_rel(sample_metric1, sample_metric2) elif (mode == 'fisher'): correct1 = sample_metric1.count(True) correct2 = sample_metric2.count(True) contingency_table = [[correct1, correct2], [(len(sample_metric1) - correct1), (len(sample_metric2) - correct2)]] (statistic, p_value) = stats.fisher_exact(contingency_table) elif (mode == 'mcnemar'): correct1 = sample_metric1.count(True) correct2 = sample_metric2.count(True) contingency_table = [[correct1, correct2], [(len(sample_metric1) - correct1), (len(sample_metric2) - correct2)]] b = mcnemar(contingency_table, exact=True) (statistic, p_value) = (b.statistic, b.pvalue) else: raise NotImplementedError print(f'[Runner] - The testing scores of the two ckpts are {average1} and {average2}, respectively.') print(f'[Runner] - The statistic of the significant test of the two ckpts is {statistic}') print(f'[Runner] - The P value of significant test of the two ckpts is {p_value}')
class Timer(): def __init__(self): self.timings = {} self.start_time = 0 def start(self): self.start_time = time.time() def end(self): frameinfo = inspect.getouterframes(inspect.currentframe())[1] filename = frameinfo.filename filename = '/'.join(filename.split('/')[(- 2):]) marker = f'{filename}:{frameinfo.lineno}' if (marker not in self.timings.keys()): self.timings[marker] = [] else: self.timings[marker].append(float((time.time() - self.start_time))) def report(self): n_points = len(self.timings.keys()) if (n_points > 0): print('[TIMER]:') for marker in self.timings: print(f'{marker}: {torch.FloatTensor(self.timings[marker]).mean().item()}') else: print('[TIMER]: No record')
def get_runner_args(): parser = argparse.ArgumentParser(description='Argument Parser for the S3PLR project.') parser.add_argument('--config', default='../config/deprecated_runner/tera_libri_fmllrBase_pretrain,yaml', type=str, help='Path to experiment config.', required=False) parser.add_argument('--seed', default=1337, type=int, help='Random seed for reproducable results.', required=False) parser.add_argument('--logdir', default='../log/log_transformer/', type=str, help='Logging path.', required=False) parser.add_argument('--name', default=None, type=str, help='Name for logging.', required=False) parser.add_argument('--load', action='store_true', help='Load pre-trained model to restore training, no need to specify this during testing.') parser.add_argument('--ckpdir', default='../result/result_transformer/', type=str, help='path to store experiment result.', required=False) parser.add_argument('--ckpt', default='fmllrBase960-F-N-K-libri/states-1000000.ckpt', type=str, help='path to transformer model checkpoint.', required=False) parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False) parser.add_argument('--apc_path', default='../result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False) parser.add_argument('--train', action='store_true', help='Train the model.') parser.add_argument('--run_transformer', action='store_true', help='train and test the downstream tasks using speech representations.') parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.') parser.add_argument('--fine_tune', action='store_true', help='fine tune the transformer model with downstream task.') parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.') parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or speech representations.') parser.add_argument('--test_phone', action='store_true', help='Test mel or speech representations using the trained phone classifier.') parser.add_argument('--train_cpc_phone', action='store_true', help='Train the phone classifier on mel or speech representations with the alignments in CPC paper.') parser.add_argument('--test_cpc_phone', action='store_true', help='Test mel or speech representations using the trained phone classifier with the alignments in CPC paper.') parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or speech representations.') parser.add_argument('--test_sentiment', action='store_true', help='Test mel or speech representations using the trained sentiment classifier.') parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or speech representations.') parser.add_argument('--test_speaker', action='store_true', help='Test mel or speech representations using the trained speaker classifier.') parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.') parser.add_argument('--plot_attention', action='store_true', help='plot attention') parser.add_argument('--load_ws', default='result/result_transformer_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model') parser.add_argument('--cpu', action='store_true', help='Disable GPU training.') parser.add_argument('--multi_gpu', action='store_true', help='Enable Multi-GPU training.') parser.add_argument('--no_msg', action='store_true', help='Hide all messages.') parser.add_argument('--test_reconstruct', action='store_true', help='Test reconstruction capability') args = parser.parse_args() setattr(args, 'gpu', (not args.cpu)) setattr(args, 'verbose', (not args.no_msg)) config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader) parse_prune_heads(config) return (config, args)
def main(): (config, args) = get_runner_args() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if args.train: from transformer.solver import Trainer trainer = Trainer(config, args) trainer.load_data(split='train') trainer.set_model(inference=False) trainer.exec() if args.test_reconstruct: from transformer.solver import Trainer trainer = Trainer(config, args) trainer.load_data(split='test') trainer.set_model(inference=True, with_head=True) trainer.test_reconstruct() elif args.train_phone: from downstream.solver import Downstream_Trainer task = ('transformer_phone' if args.run_transformer else ('apc_phone' if args.run_apc else 'baseline_phone')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='montreal_phone') trainer.set_model(inference=False) trainer.exec() elif args.test_phone: from downstream.solver import Downstream_Tester task = ('transformer_phone' if args.run_transformer else ('apc_phone' if args.run_apc else 'baseline_phone')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='montreal_phone') tester.set_model(inference=True) tester.exec() elif args.train_cpc_phone: from downstream.solver import Downstream_Trainer task = ('transformer_cpc_phone' if args.run_transformer else ('apc_cpc_phone' if args.run_apc else 'baseline_cpc_phone')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='cpc_phone') trainer.set_model(inference=False) trainer.exec() elif args.test_cpc_phone: from downstream.solver import Downstream_Tester task = ('transformer_cpc_phone' if args.run_transformer else ('apc_cpc_phone' if args.run_apc else 'baseline_cpc_phone')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='cpc_phone') tester.set_model(inference=True) tester.exec() elif args.train_sentiment: from downstream.solver import Downstream_Trainer task = ('transformer_sentiment' if args.run_transformer else ('apc_sentiment' if args.run_apc else 'baseline_sentiment')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='sentiment') trainer.set_model(inference=False) trainer.exec() elif args.test_sentiment: from downstream.solver import Downstream_Tester task = ('transformer_sentiment' if args.run_transformer else ('apc_sentiment' if args.run_apc else 'baseline_sentiment')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='sentiment') tester.set_model(inference=True) tester.exec() elif args.train_speaker: from downstream.solver import Downstream_Trainer task = ('transformer_speaker' if args.run_transformer else ('apc_speaker' if args.run_apc else 'baseline_speaker')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='speaker') trainer.set_model(inference=False) trainer.exec() elif args.test_speaker: from downstream.solver import Downstream_Tester task = ('transformer_speaker' if args.run_transformer else ('apc_speaker' if args.run_apc else 'baseline_speaker')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='speaker') tester.set_model(inference=True) tester.exec() elif args.plot: from transformer.solver import Tester tester = Tester(config, args) tester.load_data(split='test', load_mel_only=True) tester.set_model(inference=True, with_head=args.with_head) tester.plot(with_head=args.with_head) elif args.plot_attention: from transformer.solver import Tester tester = Tester(config, args) tester.load_data(split='test', load_mel_only=True) tester.set_model(inference=True, output_attention=True) tester.plot_attention()
def pytest_addoption(parser): parser.addoption('--runupstream', action='store_true', help='run upstream tests') parser.addoption('--runslow', action='store_true', help='run slow tests') parser.addoption('--runcorpus', action='store_true', help='run tests with corpus path dependency') parser.addoption('--practice', action='store_true', help='for test scripts only for practice and not real test cases.') parser.addoption('--runextra', action='store_true', help='run tests with extra dependencies') parser.addoption('--fairseq', action='store_true', help='run tests with fairseq dependencies') parser.addoption('--upstream_names', action='store')
def pytest_generate_tests(metafunc): option_value = metafunc.config.option.upstream_names if ('upstream_names' in metafunc.fixturenames): metafunc.parametrize('upstream_names', [option_value])
def pytest_configure(config): config.addinivalue_line('markers', 'upstream: mark test as a upstream test case') config.addinivalue_line('markers', 'slow: mark test as slow to run') config.addinivalue_line('markers', 'corpus: mark test as required corpus path dependency') config.addinivalue_line('markers', 'extra_dependency: mask test requiring extra dependencies to run') config.addinivalue_line('markers', 'practice: mark test as a practice') config.addinivalue_line('markers', 'fairseq: mark test as a fairseq')
def pytest_collection_modifyitems(config, items): if (not config.getoption('--runupstream')): skip_upstream = pytest.mark.skip(reason='need --runupstream option to run') for item in items: if ('upstream' in item.keywords): item.add_marker(skip_upstream) if (not config.getoption('--runslow')): skip_slow = pytest.mark.skip(reason='need --runslow option to run') for item in items: if ('slow' in item.keywords): item.add_marker(skip_slow) if (not config.getoption('--runcorpus')): skip_corpus = pytest.mark.skip(reason='need --runcorpus option to run') for item in items: if ('corpus' in item.keywords): item.add_marker(skip_corpus) if (not config.getoption('--practice')): skip_practice = pytest.mark.skip(reason='need --practice option to run') for item in items: if ('practice' in item.keywords): item.add_marker(skip_practice) if (not config.getoption('--runextra')): skip_extra = pytest.mark.skip(reason='need --runextra option to run') for item in items: if ('extra_dependency' in item.keywords): item.add_marker(skip_extra) if (not config.getoption('--fairseq')): skip_extra = pytest.mark.skip(reason='need --fairseq option to run') for item in items: if ('fairseq' in item.keywords): item.add_marker(skip_extra)
class Helper(): pass
@pytest.fixture def helpers(): return Helper
@pytest.mark.parametrize('vocab_type', ['subword', 'character']) def test_superb_asr(vocab_type): if (vocab_type == 'subword'): vocab_args = {'vocab_size': 18} else: vocab_args = {} with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestASR(SuperbASR): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): all_wav_paths = wav_paths all_text = ['hello how are you today', 'fine', 'oh', 'I think is good', 'maybe okay'] ids = list(range(len(all_wav_paths))) df = pd.DataFrame(data={'id': ids, 'wav_path': all_wav_paths, 'transcription': all_text}) train_path = (Path(target_dir) / 'train.csv') valid_path = (Path(target_dir) / 'valid.csv') test_path = (Path(target_dir) / 'test.csv') df.iloc[:3].to_csv(train_path, index=False) df.iloc[3:4].to_csv(valid_path, index=False) df.iloc[4:].to_csv(test_path, index=False) return (train_path, valid_path, [test_path]) problem = TestASR() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_tokenizer'] = {'vocab_type': vocab_type, 'vocab_args': vocab_args} config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_er(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestER(SuperbER): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): ids = [Path(path).stem for path in wav_paths] labels = ['a', 'b', 'a', 'c', 'd'] start_secs = [0.0, 0.1, 0.2, None, 0.0] end_secs = [5.2, 1.0, 0.3, None, 4.9] df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': labels, 'start_sec': start_secs, 'end_sec': end_secs}) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestER() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_ks(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestKS(SuperbKS): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): ids = [Path(path).stem for path in wav_paths] labels = ['a', 'b', 'a', 'c', 'd'] start_secs = [0.0, 0.1, 0.2, None, 0.0] end_secs = [5.2, 1.0, 0.3, None, 4.9] df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': labels, 'start_sec': start_secs, 'end_sec': end_secs}) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestKS() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_pr(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestPR(SuperbPR): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): from s3prl.dataio.encoder.g2p import G2P all_wav_paths = wav_paths all_text = ['hello how are you today', 'fine', 'oh', 'I think is good', 'maybe okay'] g2p = G2P() all_text = [g2p.encode(text.strip()) for text in all_text] ids = list(range(len(all_wav_paths))) df = pd.DataFrame(data={'id': ids, 'wav_path': all_wav_paths, 'transcription': all_text}) train_path = (Path(target_dir) / 'train.csv') valid_path = (Path(target_dir) / 'valid.csv') test_path = (Path(target_dir) / 'test.csv') df.iloc[:3].to_csv(train_path, index=False) df.iloc[3:4].to_csv(valid_path, index=False) df.iloc[4:].to_csv(test_path, index=False) return (train_path, valid_path, [test_path]) problem = TestPR() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_ic(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestIC(SuperbIC): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): ids = [Path(path).stem for path in wav_paths] labels1 = ['a', 'b', 'a', 'c', 'd'] labels2 = ['1', '2', '3', '4', '5'] df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'labels': [f'{label1} ; {label2}' for (label1, label2) in zip(labels1, labels2)]}) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestIC() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_sid(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestSID(SuperbSID): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): ids = [Path(path).stem for path in wav_paths] label = ['a', 'b', 'a', 'c', 'd'] start_secs = [0.0, 0.1, 0.2, None, 0.0] end_secs = [5.2, 1.0, 0.3, None, 4.9] df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': label, 'start_sec': start_secs, 'end_sec': end_secs}) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestSID() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_sd(): with tempfile.TemporaryDirectory() as tempdir: secs = [10, 2, 1, 8, 5] with pseudo_audio(secs) as (wav_paths, num_samples): class TestSD(SuperbSD): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False): record_id = [Path(path).stem for path in wav_paths] durations = secs speaker = ['a', 'b', 'a', 'a', 'b'] utt_id = record_id start_secs = [0.0, 0.1, 0.2, 0.3, 0.0] end_secs = [5.2, 1.0, 0.3, 5.4, 4.9] df = pd.DataFrame(data={'record_id': record_id, 'wav_path': wav_paths, 'duration': durations, 'utt_id': utt_id, 'speaker': speaker, 'start_sec': start_secs, 'end_sec': end_secs}) train_csv = (Path(target_dir) / 'train.csv') valid_csv = (Path(target_dir) / 'valid.csv') test_csv = (Path(target_dir) / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestSD() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_asv(): with tempfile.TemporaryDirectory() as tempdir: secs = [10, 2, 1, 8, 5] with pseudo_audio(secs) as (wav_paths, num_samples): class TestASV(SuperbASV): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): train_csv = (Path(target_dir) / 'train.csv') test_csv = (Path(target_dir) / 'test.csv') ids = [Path(path).stem for path in wav_paths] spk = ['a', 'b', 'c', 'a', 'b'] train_df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'spk': spk}) train_df.to_csv(train_csv) id1 = [ids[0], ids[1], ids[2]] id2 = [ids[1], ids[1], ids[2]] wav_path1 = [wav_paths[0], wav_paths[1], wav_paths[2]] wav_path2 = [wav_paths[1], wav_paths[1], wav_paths[2]] labels = [0, 1, 1] test_df = pd.DataFrame(data={'id1': id1, 'id2': id2, 'wav_path1': wav_path1, 'wav_path2': wav_path2, 'label': labels}) test_df.to_csv(test_csv) return (train_csv, [test_csv]) problem = TestASV() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = math.inf config['train']['save_step'] = 1 config['build_upstream']['name'] = 'fbank' problem.run(**config)
@pytest.mark.parametrize('vocab_type', ['subword', 'character']) def test_superb_sf(vocab_type): if (vocab_type == 'subword'): vocab_args = {'vocab_size': 22} else: vocab_args = {} with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestSF(SuperbSF): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): all_wav_paths = wav_paths all_text_with_iob = [('hello how are you today', 'O O O O timeRange'), ('fine thank you', 'condition O O'), ('oh nice', 'O condition'), ('I think is good', 'O O O genre'), ('maybe okay', 'O genre')] (text, iob) = zip(*all_text_with_iob) ids = list(range(len(all_wav_paths))) df = pd.DataFrame(data={'id': ids, 'wav_path': all_wav_paths, 'transcription': text, 'iob': iob}) train_path = (Path(target_dir) / 'train.csv') valid_path = (Path(target_dir) / 'valid.csv') test_path = (Path(target_dir) / 'test.csv') df.iloc[:3].to_csv(train_path, index=False) df.iloc[3:4].to_csv(valid_path, index=False) df.iloc[4:].to_csv(test_path, index=False) return (train_path, valid_path, [test_path]) problem = TestSF() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_tokenizer'] = {'vocab_type': vocab_type, 'vocab_args': vocab_args} config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_audio_info(): with pseudo_audio([3.0, 4.1, 1.1]) as (paths, num_samples): infos = get_audio_info(paths, [Path(path).stem for path in paths]) assert (infos[0]['num_frames'] == (3 * 16000))
@pytest.mark.parametrize('duplicate', [10000, 100000]) def test_balanced_weighted_sampler(duplicate: int): labels = ['a', 'a', 'b', 'a'] batch_size = 5 prev_diff_ratio = 1.0 sampler = BalancedWeightedSampler(labels, batch_size=batch_size, duplicate=duplicate, seed=0) indices = list(sampler) assert (len(indices[0]) == batch_size) counter = Counter() for batch_indices in indices: for idx in batch_indices: counter.update(labels[idx]) diff_ratio = ((abs((counter['a'] - counter['b'])) / duplicate) * len(labels)) assert (diff_ratio < prev_diff_ratio) prev_diff_ratio = diff_ratio (diff_ratio < 0.05)
@pytest.mark.extra_dependency def test_beam_decoder(): decoder = BeamDecoder() emissions = torch.randn((4, 100, 31)) emissions = torch.log_softmax(emissions, dim=2) hyps = decoder.decode(emissions)
def _download_with_timeout(timeout: float, num_process: int): processes = [] for _ in range(num_process): process = Process(target=_urls_to_filepaths, args=(URL,), kwargs=dict(refresh=True)) process.start() processes.append(process) exitcodes = [] for process in processes: process.join(timeout=timeout) exitcodes.append(process.exitcode) assert (len(set(exitcodes)) == 1) exitcode = exitcodes[0] if (exitcode != 0): for process in processes: process.terminate()
def test_download(): filepath = Path(_urls_to_filepaths(URL, download=False)) if filepath.is_file(): os.remove(filepath) logger.info('This should timeout') _download_with_timeout(0.1, 2) assert (not filepath.is_file()), 'The download should failed due to the too short timeout second: 0.1 sec, and hence there should not be any corrupted (incomplete) file' logger.info('This should success') _download_with_timeout(None, 2) torch.load(filepath, map_location='cpu')
@pytest.mark.corpus @pytest.mark.parametrize('fold_id', [0, 1, 2, 3, 4]) def test_er_dataset(fold_id): v3_er_folder = (((Path(__file__).parent.parent / 's3prl') / 'downstream') / 'emotion') IEMOCAP = dotenv_values()['IEMOCAP'] with (v3_er_folder / 'config.yaml').open() as file: config = yaml.load(file, Loader=yaml.FullLoader)['downstream_expert'] config['datarc']['root'] = IEMOCAP config['datarc']['meta_data'] = (v3_er_folder / 'meta_data') config['datarc']['test_fold'] = f'fold{(fold_id + 1)}' with tempfile.TemporaryDirectory() as tempdir: expert = DownstreamExpert(320, config, tempdir) train_dataset_v3 = expert.get_dataloader('train').dataset valid_dataset_v3 = expert.get_dataloader('dev').dataset test_dataset_v3 = expert.get_dataloader('test').dataset with tempfile.TemporaryDirectory() as tempdir: default_config = SuperbER().default_config() (train_csv, valid_csv, test_csvs) = SuperbER().prepare_data({'iemocap': IEMOCAP, 'test_fold': fold_id}, tempdir, tempdir) encoder_path = SuperbER().build_encoder(default_config['build_encoder'], tempdir, tempdir, train_csv, valid_csv, test_csvs) train_dataset_v4 = SuperbER().build_dataset(default_config['build_dataset'], tempdir, tempdir, 'train', train_csv, encoder_path, None) valid_dataset_v4 = SuperbER().build_dataset(default_config['build_dataset'], tempdir, tempdir, 'valid', valid_csv, encoder_path, None) test_dataset_v4 = SuperbER().build_dataset(default_config['build_dataset'], tempdir, tempdir, 'test', test_csvs[0], encoder_path, None) def compare_dataset(v3, v4): data_v3 = {} for (wav, label, name) in tqdm(v3, desc='v3'): if isinstance(v3, Subset): v3 = v3.dataset label_name = [k for (k, v) in v3.class_dict.items() if (v == label)][0] data_v3[name] = label_name data_v4 = {} for batch in tqdm(v4, desc='v4'): data_v4[batch['unique_name']] = batch['label'] assert (sorted(data_v3.keys()) == sorted(data_v4.keys())) for key in data_v3: value_v3 = data_v3[key] value_v4 = data_v4[key] assert (value_v3 == value_v4) compare_dataset(train_dataset_v3, train_dataset_v4) compare_dataset(valid_dataset_v3, valid_dataset_v4) compare_dataset(test_dataset_v3, test_dataset_v4)
@pytest.mark.corpus def test_fluent_commands(): config = dotenv_values() dataset_root = config['FluentSpeechCommands'] dataset = FluentSpeechCommands(dataset_root) dataset.data_split_ids dataset.data_split dataset.all_data
def test_chunking(): chunks = list(chunking(0.0, 8.5, 2.0, 1.0, False)) assert (len(chunks) == 7) chunks = list(chunking(1.1, 8.5, 2.0, 1.0, True)) assert (len(chunks) == 8)
def test_frame_tensor_label(): labels = [(0, 3.0, 4.1), (1, 1.2, 3.2)] label = chunk_labels_to_frame_tensor_label(1.5, 4.0, labels, 3, 160) assert (label[((- 1), 0)] == 1) assert (label[(0, 1)] == 1)
def test_g2p(): g2p = G2P() char_sent = 'HELLO WORLD' phn_sent = g2p.encode(char_sent) logging.info(phn_sent)
@pytest.mark.corpus def test_librispeech_dataset(): config = dotenv_values() dataset_root = config['LibriSpeech'] dataset = LibriSpeech(dataset_root, train_split=['train-clean-100', 'train-clean-360'], valid_split=['dev-clean', 'dev-other'], test_split=['test-clean', 'test-other']) data = dataset.all_data assert (len(data) == (292367 - libri_stats['train-other-500']))
@pytest.mark.corpus def test_librilight(): config = dotenv_values() train_corpus = LibriLight(config['LibriLight']) eval_corpus = LibriSpeech(config['LibriSpeech'], 4, []) train_data = train_corpus.all_data (_, valid_data, test_data) = eval_corpus.data_split assert (len(train_data) == 48)
def test_FrameLevel(helpers): module = FrameLevel(3, 4, [5, 6]) x = torch.randn(32, 10, 3) x_len = (torch.ones(32) * 3).long() (h, hl) = module(x, x_len)
def test_load_audio(): with pseudo_audio([3.0, 4.0, 5.2]) as (paths, num_frames): dataset = LoadAudio(paths, [None, 1.0, 3.1], [None, 3.2, None], max_secs=4.2) for item in dataset: assert isinstance(item['wav'], torch.Tensor)
def isclose(x: float, y: float) -> float: return (abs((x - y)) < 1e-09)
def test_metric(): hyps = ['a ac abb d'] refs = ['a ab abc d'] assert isclose(cer(hyps, refs), 0.2) assert isclose(wer(hyps, refs), 0.5) assert isclose(per(hyps, refs), 0.5)
@pytest.mark.parametrize('pooling_type', ['MeanPooling', 'TemporalStatisticsPooling', 'AttentiveStatisticsPooling', 'SelfAttentivePooling']) def test_utterance_level_with_pooling(pooling_type: str): model = UtteranceLevel(256, 64, [128], 'ReLU', None, pooling_type, None) output = model(torch.randn(32, 100, 256), (torch.arange(32) + 1)) assert (output.shape == (32, 64))
@pytest.mark.corpus def test_quesst14_for_qbe(): def quesst14_for_qbe(dataset_root: str): corpus = Quesst14(dataset_root) def path_to_dict(path: str): return dict(wav_path=path) return dict(all_data={Path(path).stem: path_to_dict(path) for path in ((corpus.valid_queries + corpus.test_queries) + corpus.docs)}, valid_keys=[Path(path).stem for path in corpus.valid_queries], test_keys=[Path(path).stem for path in corpus.test_queries], doc_keys=[Path(path).stem for path in corpus.docs]) quesst_root = dotenv_values()['Quesst14'] (all_data, valid_keys, test_keys, doc_keys) = quesst14_for_qbe(quesst_root).values() assert (len(all_data) == 2714) assert (((len(valid_keys) + len(test_keys)) + len(doc_keys)) == 2714)
def test_rnn(helpers): modules = [RNNEncoder(input_size=8, output_size=6, module='LSTM', hidden_size=[10, 10, 10], dropout=[0.1, 0.1, 0.1], layer_norm=[True, True, True], proj=[True, True, True], sample_rate=[1, 2, 1], sample_style='drop', bidirectional=True), RNNEncoder(input_size=8, output_size=6, module='LSTM', hidden_size=[10, 10, 10], dropout=[0.1, 0.1, 0.1], layer_norm=[True, True, True], proj=[True, True, True], sample_rate=[1, 2, 1], sample_style='concat', bidirectional=True)] for module in modules: xs = torch.randn(32, 50, module.input_size) xs_len = ((torch.arange(32) + (50 - 32)) + 1) (out, out_len) = module(xs, xs_len) assert (out.shape[1] == 25) assert (out.shape[2] == module.output_size) assert (out_len.max() == 25)
def _merge_batch_indices(batch_indices): all_indices = [] for indices in batch_indices: all_indices += indices return all_indices
@pytest.mark.parametrize('world_size', [1, 2, 3, 4, 5, 6, 7, 8]) def test_distributed_sampler(world_size): sampler = [[1, 2, 3], [4, 5, 6, 7], [8], [9, 10]] ddp_indices = [] for rank in range(world_size): ddp_sampler = DistributedBatchSamplerWrapper(sampler, world_size, rank) ddp_indices += _merge_batch_indices(ddp_sampler) assert (sorted(ddp_indices) == sorted(_merge_batch_indices(sampler)))
@pytest.mark.parametrize('batch_size', [1, 2, 3, len(data)]) def test_FixedBatchSizeBatchSampler(batch_size): dataset = data iter1 = list(iter(FixedBatchSizeBatchSampler(dataset, batch_size, shuffle=False))) iter2 = list(iter(FixedBatchSizeBatchSampler(dataset, batch_size, shuffle=True))) indices1 = sorted(_merge_batch_indices(iter1)) indices2 = sorted(_merge_batch_indices(iter2)) assert (indices1 == indices2 == list(range(len(timestamps))))
@pytest.mark.corpus def test_snips(): config = dotenv_values() dataset_root = config['SNIPS'] dataset = SNIPS(dataset_root, ['Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly', 'Matthew', 'Salli'], ['Aditi', 'Amy', 'Geraint', 'Nicole'], ['Brian', 'Emma', 'Raveena', 'Russell']) (train_data, valid_data, test_data) = dataset.data_split assert (len(train_data) == 104672) assert (len(valid_data) == 2800) assert (len(test_data) == 2800)