code
stringlengths
17
6.64M
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Wav2VecConfig, ckpt_state['model_cfg']) model = Wav2VecModel(model_cfg) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) (self.model, task_cfg) = load_converted_model(ckpt) if (len(self.hooks) == 0): self.add_hook('self.model.feature_extractor', (lambda input, output: output.transpose(1, 2))) self.add_hook('self.model.feature_aggregator', (lambda input, output: output.transpose(1, 2))) module_name = 'self.model.feature_aggregator.conv_layers' for conv_id in range((len(eval(module_name)) - 1)): self.add_hook(f'{module_name}[{(conv_id + 1)}]', (lambda input, output: input[0].transpose(1, 2))) def get_downsample_rates(self, key: str) -> int: return 160 def forward(self, wavs): '\n Code snippet modified from fairseq\n ' result = {} padded_wav = pad_sequence(wavs, batch_first=True) features = self.model.feature_extractor(padded_wav) result['z'] = features.transpose(1, 2).contiguous() if self.model.vector_quantizer: q_res = self.model.vector_quantizer(features, produce_targets=True) result['codewords'] = q_res['x'].transpose(1, 2).contiguous() result['codeids'] = q_res['targets'] features = q_res['x'] x = self.model.dropout_feats(features) x = self.model.feature_aggregator(x) result['c'] = x.transpose(1, 2).contiguous() result['default'] = result['c'] return result
class LegacyUpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) logger.warning('Use the legacy expert for HuBERT which depends on fairseq') import fairseq from fairseq.models.wav2vec import Wav2VecModel from packaging import version if (version.parse(fairseq.__version__) > version.parse('0.10.2')): cp = torch.load(ckpt) args = cp['args'] base_wav2vec_architecture(args) self.model = Wav2VecModel.build_model(args, task=None) self.model.load_state_dict(cp['model']) elif (version.parse(fairseq.__version__) == version.parse('0.10.2')): cp = torch.load(ckpt) self.model = Wav2VecModel.build_model(cp['args'], task=None) self.model.load_state_dict(cp['model']) else: raise NotImplementedError if (len(self.hooks) == 0): self.add_hook('self.model.feature_extractor', (lambda input, output: output.transpose(1, 2))) self.add_hook('self.model.feature_aggregator', (lambda input, output: output.transpose(1, 2))) module_name = 'self.model.feature_aggregator.conv_layers' for conv_id in range((len(eval(module_name)) - 1)): self.add_hook(f'{module_name}[{(conv_id + 1)}]', (lambda input, output: input[0].transpose(1, 2))) def get_downsample_rates(self, key: str) -> int: return 160 def forward(self, wavs): '\n Code snippet modified from fairseq\n ' result = {} padded_wav = pad_sequence(wavs, batch_first=True) features = self.model.feature_extractor(padded_wav) result['z'] = features.transpose(1, 2).contiguous() if self.model.vector_quantizer: q_res = self.model.vector_quantizer(features, produce_targets=True) result['codewords'] = q_res['x'].transpose(1, 2).contiguous() result['codeids'] = q_res['targets'] features = q_res['x'] x = self.model.dropout_feats(features) x = self.model.feature_aggregator(x) result['c'] = x.transpose(1, 2).contiguous() result['default'] = result['c'] return result
def base_wav2vec_architecture(args): conv_feature_layers = '[(512, 10, 5)]' conv_feature_layers += ' + [(512, 8, 4)]' conv_feature_layers += ' + [(512, 4, 2)] * 3' args.conv_feature_layers = getattr(args, 'conv_feature_layers', conv_feature_layers) args.conv_aggregator_layers = getattr(args, 'conv_aggregator_layers', '[(512, 3, 1)] * 9') args.prediction_steps = getattr(args, 'prediction_steps', 12) args.num_negatives = getattr(args, 'num_negatives', 1) args.sample_distance = getattr(args, 'sample_distance', None) args.cross_sample_negatives = getattr(args, 'cross_sample_negatives', 0) args.dropout = getattr(args, 'dropout', 0.0) args.dropout_features = getattr(args, 'dropout_features', 0.0) args.dropout_agg = getattr(args, 'dropout_agg', 0.0) args.encoder = getattr(args, 'encoder', 'cnn') args.aggregator = getattr(args, 'aggregator', 'cnn') args.skip_connections_feat = getattr(args, 'skip_connections_feat', False) args.skip_connections_agg = getattr(args, 'skip_connections_agg', False) args.residual_scale = getattr(args, 'residual_scale', 0.5) args.gru_dim = getattr(args, 'gru_dim', 512) args.no_conv_bias = getattr(args, 'no_conv_bias', False) args.agg_zero_pad = getattr(args, 'agg_zero_pad', False) args.log_compression = getattr(args, 'log_compression', False) args.balanced_classes = getattr(args, 'balanced_classes', False) args.infonce = getattr(args, 'infonce', False) args.project_features = getattr(args, 'project_features', 'none') args.non_affine_group_norm = getattr(args, 'non_affine_group_norm', False) args.offset = getattr(args, 'offset', 'auto') args.activation = getattr(args, 'activation', 'relu') args.vq_type = getattr(args, 'vq_type', 'none') args.vq_vars = getattr(args, 'vq_vars', 320) args.vq_groups = getattr(args, 'vq_groups', 2) args.vq_dim = getattr(args, 'vq_dim', 0) args.vq_depth = getattr(args, 'vq_depth', 1) args.combine_groups = getattr(args, 'combine_groups', False) args.vq_temp = getattr(args, 'vq_temp', '(2.0, 0.5, 0.999995)') args.vq_gamma = getattr(args, 'vq_gamma', 0.25)
def wav2vec_custom(ckpt: str, *args, legacy: bool=False, refresh: bool=False, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
def wav2vec_local(*args, **kwargs): return wav2vec_custom(*args, **kwargs)
def wav2vec_url(*args, **kwargs): return wav2vec_custom(*args, **kwargs)
def wav2vec(refresh=False, *args, **kwargs): '\n The default model - Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' return wav2vec_large(*args, refresh=refresh, **kwargs)
def wav2vec_large(refresh=False, legacy=False, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_large.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec_large.pt' return wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Wav2Vec2Config, ckpt_state['model_cfg']) model = Wav2Vec2Model(model_cfg) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
def wav2vec2_custom(ckpt: str, legacy: bool=False, fairseq: bool=False, refresh: bool=False, **kwargs): assert (not (legacy and fairseq)), "The option 'legacy' will directly load a fairseq checkpoint, while the option 'fairseq' will first convert the fairseq checkpoint to be fairseq indenpendent and then load the checkpoint. These two options cannot be used jointly." if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) if fairseq: ckpt: Path = Path(ckpt) converted_ckpt = (ckpt.parent / f'{ckpt.stem}.converted.pt') lock_file = Path((str(converted_ckpt) + '.lock')) logger.info(f'Converting a fairseq checkpoint: {ckpt}') logger.info(f'To: {converted_ckpt}') with FileLock(str(lock_file)): if ((not converted_ckpt.is_file()) or (refresh and ((time.time() - os.path.getmtime(ckpt)) > NEW_ENOUGH_SECS))): load_and_convert_fairseq_ckpt(ckpt, converted_ckpt) ckpt = converted_ckpt assert os.path.isfile(ckpt) if legacy: return _LegacyUpstreamExpert(ckpt, **kwargs) else: return _UpstreamExpert(ckpt, **kwargs)
def wav2vec2_local(*args, **kwargs): return wav2vec2_custom(*args, **kwargs)
def wav2vec2_url(*args, **kwargs): return wav2vec2_custom(*args, **kwargs)
def wav2vec2(refresh=False, *args, **kwargs): '\n The default model - Base\n refresh (bool): whether to download ckpt/config again if existed\n ' return wav2vec2_base_960(*args, refresh=refresh, **kwargs)
def wav2vec2_base_960(refresh=False, legacy=False, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec_small.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_960(refresh=False, legacy=False, **kwargs): '\n The Large model trained on LibriSpeech 960 hours of data\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/libri960_big.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/libri960_big.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_ll60k(refresh=False, legacy=False, **kwargs): '\n The Large model trained on Libri-light 60k hours of data\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec_vox_new.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_lv60_cv_swbd_fsh(refresh=False, legacy=False, **kwargs): '\n The Large model trained on Libri-Light 60k hours + CommonVoice + Switchboard + Fisher\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/w2v_large_lv_fsh_swbd_cv.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xlsr_53(refresh=False, legacy=False, **kwargs): '\n The wav2vec 2.0 model trained on multilingual presented in https://arxiv.org/abs/2006.13979\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr_53_56k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xls_r_300m(refresh=False, legacy=False, **kwargs): '\n XLS-R, this smallest size has the same parameters as the Largs model of wav2vec 2.0 and HuBERT\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_300m.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr2_300m.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xls_r_1b(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_960m_1000k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr2_960m_1000k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xls_r_2b(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_2B_1000k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr2_2B_1000k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_relpos(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_relpos_PT_no_FT' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/LL_relpos_PT_no_FT.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_rope(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_rope_PT_no_FT' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/LL_rope_PT_no_FT.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_voxpopuli_100k(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/voxpopuli/models/wav2vec2_large_100k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_large_100k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_base_s2st_es_voxpopuli(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/es/transformer_B.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_base_s2st_es_voxpopuli.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_large_s2st_es_voxpopuli(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/es/conformer_L.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_conformer_large_s2st_es_voxpopuli.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_base_s2st_en_librilight(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/en/transformer_B.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_base_s2st_en_librilight.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_large_s2st_en_librilight(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/en/conformer_L.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_conformer_large_s2st_en_librilight.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) checkpoint = torch.load(ckpt) self.cfg = WavLMConfig(checkpoint['cfg']) self.model = WavLM(self.cfg) self.model.load_state_dict(checkpoint['model']) self.model.feature_grad_mult = 0.0 self.model.encoder.layerdrop = 0.0 if (len(self.hooks) == 0): module_name = 'self.model.encoder.layers' for module_id in range(len(eval(module_name))): self.add_hook(f'{module_name}[{module_id}]', (lambda input, output: input[0].transpose(0, 1))) self.add_hook('self.model.encoder', (lambda input, output: output[0])) self._init_layerdrop = self.model.encoder.layerdrop @property def layer_drop(self): return self.model.encoder.layerdrop def set_layer_drop(self, layerdrop: float=None): if isinstance(layerdrop, float): self.model.encoder.layerdrop = layerdrop elif (layerdrop is None): self.model.encoder.layerdrop = self._init_layerdrop else: raise ValueError('layerdrop can only be float or None') def get_downsample_rates(self, key: str) -> int: return 320 def forward(self, wavs): if self.cfg.normalize: wavs = [F.layer_norm(wav, wav.shape) for wav in wavs] device = wavs[0].device wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device) wav_padding_mask = (~ torch.lt(torch.arange(max(wav_lengths)).unsqueeze(0).to(device), wav_lengths.unsqueeze(1))) padded_wav = pad_sequence(wavs, batch_first=True) (features, feat_padding_mask) = self.model.extract_features(padded_wav, padding_mask=wav_padding_mask, mask=False)
def wavlm_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def wavlm_url(ckpt, refresh=False, *args, **kwargs): '\n The model from google drive id\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n ' return wavlm_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def wavlm(refresh=False, *args, **kwargs): '\n The default model - Base-Plus\n refresh (bool): whether to download ckpt/config again if existed\n ' return wavlm_base_plus(*args, refresh=refresh, **kwargs)
def wavlm_base(refresh=False, *args, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wavlm_base.pt' return wavlm_url(*args, refresh=refresh, **kwargs)
def wavlm_base_plus(refresh=False, *args, **kwargs): '\n The Base-Plus model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wavlm_base_plus.pt' return wavlm_url(*args, refresh=refresh, **kwargs)
def wavlm_large(refresh=False, *args, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wavlm_large.pt' return wavlm_url(*args, refresh=refresh, **kwargs)
def get_cache_dir(): _default_cache_dir.mkdir(exist_ok=True, parents=True) return _default_cache_dir
def set_cache_dir(cache_dir: str): global _default_cache_dir _default_cache_dir = Path(cache_dir)
def get_audio_info(audio_paths: List[str], audio_ids: List[str], cache_dir: str=None, num_workers: int=6) -> List[dict]: '\n Use :code:`torchaudio.info` to retrieve the metadata from audio paths.\n The retrieved metadata is cached in :code:`cache_dir`\n ' cache_dir = (cache_dir or get_cache_dir()) cache_dir: Path = Path(cache_dir) def _get_info(audio_path: str, audio_id: str): cache_file = (cache_dir / f'{audio_id}.json') if cache_file.is_file(): with cache_file.open() as f: info = json.load(f) return info torchaudio.set_audio_backend('sox_io') torchaudio_info = torchaudio.info(audio_path) info = {'sample_rate': torchaudio_info.sample_rate, 'num_frames': torchaudio_info.num_frames, 'num_channels': torchaudio_info.num_channels, 'bits_per_sample': torchaudio_info.bits_per_sample, 'encoding': torchaudio_info.encoding} return info infos = Parallel(n_jobs=num_workers)((delayed(_get_info)(path, idx) for (path, idx) in tqdm(zip(audio_paths, audio_ids), desc='Get audio metadata'))) return infos
class benchmark(ContextDecorator): def __init__(self, name: str, freq: int=1) -> None: super().__init__() self.name = name self.freq = freq def __enter__(self): torch.cuda.synchronize() self.start = time() def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: torch.cuda.synchronize() seconds = (time() - self.start) global _history _history[self.name].append(seconds) if ((len(_history[self.name]) % self.freq) == 0): logger.warning(f'{self.name}: {seconds} secs, avg {np.array(_history[self.name]).mean()} secs')
def get_dir(): _download_dir.mkdir(exist_ok=True, parents=True) return _download_dir
def set_dir(d): global _download_dir _download_dir = Path(d)
def _download_url_to_file(url, dst, hash_prefix=None, progress=True): '\n This function is not thread-safe. Please ensure only a single\n thread or process can enter this block at the same time\n ' file_size = None req = Request(url, headers={'User-Agent': 'torch.hub'}) u = urlopen(req) meta = u.info() if hasattr(meta, 'getheaders'): content_length = meta.getheaders('Content-Length') else: content_length = meta.get_all('Content-Length') if ((content_length is not None) and (len(content_length) > 0)): file_size = int(content_length[0]) dst = os.path.expanduser(dst) dst_dir = os.path.dirname(dst) f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) try: if (hash_prefix is not None): sha256 = hashlib.sha256() tqdm.write(f'Downloading: {url}', file=sys.stderr) tqdm.write(f'Destination: {dst}', file=sys.stderr) with tqdm(total=file_size, disable=(not progress), unit='B', unit_scale=True, unit_divisor=1024) as pbar: while True: buffer = u.read(8192) if (len(buffer) == 0): break f.write(buffer) if (hash_prefix is not None): sha256.update(buffer) pbar.update(len(buffer)) f.close() if (hash_prefix is not None): digest = sha256.hexdigest() if (digest[:len(hash_prefix)] != hash_prefix): raise RuntimeError('invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def _download_url_to_file_requests(url, dst, hash_prefix=None, progress=True): '\n Alternative download when urllib.Request fails.\n ' req = requests.get(url, stream=True, headers={'User-Agent': 'torch.hub'}) file_size = int(req.headers['Content-Length']) dst = os.path.expanduser(dst) dst_dir = os.path.dirname(dst) f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) try: if (hash_prefix is not None): sha256 = hashlib.sha256() tqdm.write(f'urllib.Request method failed. Trying using another method...', file=sys.stderr) tqdm.write(f'Downloading: {url}', file=sys.stderr) tqdm.write(f'Destination: {dst}', file=sys.stderr) with tqdm(total=file_size, disable=(not progress), unit='B', unit_scale=True, unit_divisor=1024) as pbar: for chunk in req.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) if (hash_prefix is not None): sha256.update(chunk) pbar.update(len(chunk)) f.close() if (hash_prefix is not None): digest = sha256.hexdigest() if (digest[:len(hash_prefix)] != hash_prefix): raise RuntimeError('invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def _download(filepath: Path, url, refresh: bool, new_enough_secs: float=2.0): '\n If refresh is True, check the latest modfieid time of the filepath.\n If the file is new enough (no older than `new_enough_secs`), than directly use it.\n If the file is older than `new_enough_secs`, than re-download the file.\n This function is useful when multi-processes are all downloading the same large file\n ' Path(filepath).parent.mkdir(exist_ok=True, parents=True) lock_file = Path((str(filepath) + '.lock')) logger.info(f'Requesting URL: {url}') with FileLock(str(lock_file)): if ((not filepath.is_file()) or (refresh and ((time.time() - os.path.getmtime(filepath)) > new_enough_secs))): try: _download_url_to_file(url, filepath) except: _download_url_to_file_requests(url, filepath) logger.info(f"Using URL's local file: {filepath}")
def _urls_to_filepaths(*args, refresh=False, download: bool=True): '\n Preprocess the URL specified in *args into local file paths after downloading\n\n Args:\n Any number of URLs (1 ~ any)\n\n Return:\n Same number of downloaded file paths\n ' def _url_to_filepath(url): assert isinstance(url, str) m = hashlib.sha256() m.update(str.encode(url)) filepath = (get_dir() / f'{str(m.hexdigest())}.{Path(url).name}') if download: _download(filepath, url, refresh=refresh) return str(filepath.resolve()) paths = [_url_to_filepath(url) for url in args] return (paths if (len(paths) > 1) else paths[0])
def parse_override(string): '\n Example usgae:\n -o "optimizer.lr=1.0e-3,,optimizer.name=\'AdamW\',,runner.eval_dataloaders=[\'dev\', \'test\']"\n\n Convert to:\n {\n "optimizer": {"lr": 1.0e-3, "name": "AdamW"},\n "runner": {"eval_dataloaders": ["dev", "test"]}\n }\n ' options = string.split(',,') config = {} for option in options: option = option.strip() (key, value_str) = option.split('=') (key, value_str) = (key.strip(), value_str.strip()) remaining = key.split('.') try: value = eval(value_str) except: value = value_str logger.info(f'{key} = {value}') target_config = config for (i, field_name) in enumerate(remaining): if (i == (len(remaining) - 1)): target_config[field_name] = value else: target_config.setdefault(field_name, {}) target_config = target_config[field_name] return config
def parse_overrides(options: list): '\n Example usgae:\n [\n "--optimizer.lr",\n "1.0e-3",\n "--optimizer.name",\n "AdamW",\n "--runner.eval_dataloaders",\n "[\'dev\', \'test\']",\n ]\n\n Convert to:\n {\n "optimizer": {"lr": 1.0e-3, "name": "AdamW"},\n "runner": {"eval_dataloaders": ["dev", "test"]}\n }\n ' config = {} for position in range(0, len(options), 2): key: str = options[position] assert key.startswith('--') key = key.strip('--') value_str: str = options[(position + 1)] (key, value_str) = (key.strip(), value_str.strip()) remaining = key.split('.') try: value = eval(value_str) except Exception as e: if (('newdict' in value_str) or ('Container' in value_str)): raise value = value_str logger.debug(f'{key} = {value}') target_config = config for (i, field_name) in enumerate(remaining): if (i == (len(remaining) - 1)): target_config[field_name] = value else: target_config.setdefault(field_name, {}) target_config = target_config[field_name] return config
class pseudo_audio(): '\n This context manager returns filepaths (List[str]) and num_samples (List[int]) on entering\n ' def __init__(self, secs: List[float], sample_rate: int=SAMPLE_RATE): self.tempdir = Path(tempfile.TemporaryDirectory().name) self.tempdir.mkdir(parents=True, exist_ok=True) self.num_samples = [] for (n, sec) in enumerate(secs): wav = torch.randn(1, round((sample_rate * sec))) torchaudio.save(str((self.tempdir / f'audio_{n}.wav')), wav, sample_rate=sample_rate) self.num_samples.append(wav.size((- 1))) self.filepaths = [str((self.tempdir / f'audio_{i}.wav')) for i in range(len(secs))] def __enter__(self): return (self.filepaths, self.num_samples) def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: shutil.rmtree(self.tempdir)
def get_pseudo_wavs(seed: int=0, n: int=2, min_secs: int=1, max_secs: int=3, sample_rate: int=SAMPLE_RATE, device: str='cpu', padded: bool=False): random.seed(seed) torch.manual_seed(seed) wavs = [] wavs_len = [] for _ in range(n): wav_length = random.randint((min_secs * sample_rate), (max_secs * sample_rate)) wav = torch.randn(wav_length, requires_grad=True).to(device) wavs_len.append(wav_length) wavs.append(wav) if (not padded): return wavs else: return (pad_sequence(wavs, batch_first=True), torch.LongTensor(wavs_len))
def fix_random_seeds(seed: int=1337) -> None: 'Fixes all random seeds, including cuDNN backends.\n\n Args:\n seed (int, optional): Random seed. Defaults to 1337.\n ' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True
def read_file(path, callback=(lambda x: x), sep=' ', default_value=''): content = {} with open(path, 'r') as file: lines = file.readlines() for (idx, line) in enumerate(lines): fields = line.strip().split(sep, maxsplit=1) if (len(fields) > 1): (filename, value) = fields else: filename = fields[0] value = default_value content[filename] = callback(value) return content
def get_ddp_sampler(dataset: Dataset, epoch: int): '\n This function will create a DistributedSampler if DDP is initialized,\n and will just return None if DDP is not initialized.\n ' if is_initialized(): sampler = DistributedSampler(dataset) sampler.set_epoch(epoch) else: sampler = None return sampler
def _download(filename, url, refresh, agent): dirpath = f'{torch.hub.get_dir()}/s3prl_cache' os.makedirs(dirpath, exist_ok=True) filepath = f'{dirpath}/{filename}' with FileLock((filepath + '.lock')): if ((not os.path.isfile(filepath)) or refresh): if (agent == 'wget'): os.system(f'wget {url} -O {filepath}') elif (agent == 'gdown'): gdown.download(url, filepath, use_cookies=False) else: print("[Download] - Unknown download agent. Only 'wget' and 'gdown' are supported.") raise NotImplementedError else: print(f'''Using cache found in {filepath} for {url}''') return filepath
def _urls_to_filepaths(*args, refresh=False, agent='wget'): '\n Preprocess the URL specified in *args into local file paths after downloading\n\n Args:\n Any number of URLs (1 ~ any)\n\n Return:\n Same number of downloaded file paths\n ' def url_to_filename(url): assert (type(url) is str) m = hashlib.sha256() m.update(str.encode(url)) return str(m.hexdigest()) def url_to_path(url, refresh): if ((type(url) is str) and (len(url) > 0)): return _download(url_to_filename(url), url, refresh, agent=agent) else: return None paths = [url_to_path(url, refresh) for url in args] return (paths if (len(paths) > 1) else paths[0])
def _gdriveids_to_filepaths(*args, refresh=False): '\n Preprocess the Google Drive id specified in *args into local file paths after downloading\n\n Args:\n Any number of Google Drive ids (1 ~ any)\n\n Return:\n Same number of downloaded file paths\n ' def gdriveid_to_url(gdriveid): if ((type(gdriveid) is str) and (len(gdriveid) > 0)): return f'https://drive.google.com/uc?id={gdriveid}' else: return None return _urls_to_filepaths(*[gdriveid_to_url(gid) for gid in args], refresh=refresh, agent='gdown')
def check_model_equiv(model1, model2): for (p1, p2) in zip(model1.parameters(), model2.parameters()): if (p1.data.ne(p2.data).sum() > 0): return False if (not torch.equal(p1[0], p2[0])): return False if (not torch.equal(p1[1].data, p2[1].data)): return False return True
def copyParams(module_src, module_dest): params1 = module_src.named_parameters() params2 = module_dest.named_parameters() dict_params2 = dict(params2) for (name1, param1) in params1: if (name1 in dict_params2): dict_params2[name1].data.copy_(param1.data)
def main(): input_ckpt = sys.argv[1] from transformer.nn_transformer import SPEC_TRANSFORMER options = {'ckpt_file': input_ckpt, 'load_pretrain': 'True', 'no_grad': 'True', 'dropout': 'default', 'spec_aug': 'False', 'spec_aug_prev': 'True', 'weighted_sum': 'False', 'select_layer': (- 1), 'permute_input': 'False'} old_transformer = SPEC_TRANSFORMER(options, inp_dim=(- 1)) from s3prl.upstream.mockingjay.model import TransformerForMaskedAcousticModel model = TransformerForMaskedAcousticModel(old_transformer.model_config, old_transformer.inp_dim, old_transformer.inp_dim).to(torch.device('cuda')) assert (not check_model_equiv(old_transformer.model, model.Transformer)) copyParams(old_transformer.model, model.Transformer) assert check_model_equiv(old_transformer.model, model.Transformer) assert (not check_model_equiv(old_transformer.SpecHead, model.SpecHead)) copyParams(old_transformer.SpecHead, model.SpecHead) assert check_model_equiv(old_transformer.SpecHead, model.SpecHead) global_step = old_transformer.all_states['Global_step'] settings = old_transformer.all_states['Settings'] all_states = {'SpecHead': model.SpecHead.state_dict(), 'Transformer': model.Transformer.state_dict(), 'Global_step': global_step, 'Settings': settings} new_ckpt_path = input_ckpt.replace('.ckpt', '-new.ckpt') torch.save(all_states, new_ckpt_path) print('Done fixing ckpt: ', input_ckpt, 'to: ', new_ckpt_path)
def main(): log_file = str(sys.argv[1]) ckpts = glob.glob((os.path.dirname(log_file) + '/states-*.ckpt')) sorted_ckpts = sorted(ckpts, key=(lambda ckpt: int(ckpt.split('.')[0].split('-')[(- 1)]))) print(f'The last ckpt: {sorted_ckpts[(- 1)]}') if (len(sys.argv) == 3): stop_step = int(sys.argv[2]) else: stop_step = 99999999 (best_dev, best_step, best_test) = (None, None, None) with open(log_file) as f: lines = f.readlines() for line in lines: line = line.strip('\n').split(' ') if (line[0].lower() == 'new'): best_dev = line[(- 1)] best_step = line[(- 2)].strip(':') if (line[0].lower() == 'test'): if (line[(- 2)].strip(':') == best_step): best_test = line[(- 1)] if (int(line[(- 2)].strip(':')) > stop_step): break print(f'The best dev score {best_dev} at step {best_step}, accoupanied by this test score {best_test}')
def main(): log_file = str(sys.argv[1]) if (len(sys.argv) == 4): rank_by = str(sys.argv[2]) target = str(sys.argv[3]) large_or_small = str(sys.argv[4]) else: rank_by = 'dev' target = 'test' large_or_small = '+' best_record = [(- 99999), 0, None] if (large_or_small == '-'): best_record[0] *= (- 1) with open(log_file) as f: lines = f.readlines() for line in lines: line = line.strip('\n').split('/')[(- 1)].split('|') if (len(line) < 3): continue prefix = str(line[0].split(':')[(- 1)]) step = int(line[1].split(':')[(- 1)]) score = float(line[2].split(':')[(- 1)]) if (rank_by in prefix): if compare(score, best_record[0], large_or_small): best_record[0] = score best_record[1] = step elif ((step == best_record[1]) and (target in prefix)): best_record[2] = score print(f'The best {rank_by} score {best_record[0]} at step {best_record[1]}, accoupanied by this {target} score {best_record[2]}')
def compare(a, b, large_or_small): if (large_or_small == '+'): return (a > b) elif (large_or_small == '-'): return (a < b) else: raise ValueError(large_or_small)
def is_leader_process(): return ((not is_initialized()) or (get_rank() == 0))
def count_parameters(model): return sum((p.numel() for p in model.parameters() if p.requires_grad))
def count_used_parameters(model): return sum((p.numel() for p in model.parameters() if (p.grad is not None)))
def get_time_tag(): return datetime.fromtimestamp(time()).strftime('%Y-%m-%d-%H-%M-%S')
def backup(src_path, tgt_dir): stem = Path(src_path).stem suffix = Path(src_path).suffix shutil.copyfile(src_path, os.path.join(tgt_dir, f'{stem}_{get_time_tag()}{suffix}'))
def get_model_state(model): if isinstance(model, DDP): return model.module.state_dict() return model.state_dict()
def show(*args, **kwargs): if is_leader_process(): print(*args, **kwargs)
def hack_isinstance(): _isinstance = builtins.isinstance def isinstance(obj, cls): if _isinstance(obj, defaultdict): return (_isinstance(obj, cls) and issubclass(cls, defaultdict)) return _isinstance(obj, cls) builtins.isinstance = isinstance
def override(string, args, config): '\n Example usgae:\n -o "config.optimizer.lr=1.0e-3,,config.optimizer.name=\'AdamW\',,config.runner.eval_dataloaders=[\'dev\', \'test\']"\n ' options = string.split(',,') for option in options: option = option.strip() (key, value_str) = option.split('=') (key, value_str) = (key.strip(), value_str.strip()) (first_field, *remaining) = key.split('.') try: value = eval(value_str) except: value = value_str print(f'[Override] - {key} = {value}', file=sys.stderr) if (first_field == 'args'): assert (len(remaining) == 1) setattr(args, remaining[0], value) elif (first_field == 'config'): target_config = config for (i, field_name) in enumerate(remaining): if (i == (len(remaining) - 1)): target_config[field_name] = value else: target_config.setdefault(field_name, {}) target_config = target_config[field_name]
def zero_mean_unit_var_norm(input_values: List[np.ndarray]) -> List[np.ndarray]: '\n Every array in the list is normalized to have zero mean and unit variance\n Taken from huggingface to ensure the same behavior across s3prl and huggingface\n Reference: https://github.com/huggingface/transformers/blob/a26f4d620874b32d898a5b712006a4c856d07de1/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L81-L86\n ' return [((x - np.mean(x)) / np.sqrt((np.var(x) + 1e-05))) for x in input_values]
def parse_prune_heads(config): if (('prune_headids' in config['transformer']) and (config['transformer']['prune_headids'] != 'None')): heads_int = [] spans = config['transformer']['prune_headids'].split(',') for span in spans: endpoints = span.split('-') if (len(endpoints) == 1): heads_int.append(int(endpoints[0])) elif (len(endpoints) == 2): heads_int += torch.arange(int(endpoints[0]), int(endpoints[1])).tolist() else: raise ValueError print(f'[PRUNING] - heads {heads_int} will be pruned') config['transformer']['prune_headids'] = heads_int else: config['transformer']['prune_headids'] = None
def get_transformer_tester(from_path='result/result_transformer/libri_sd1337_fmllrBase960-F-N-K-RA/model-1000000.ckpt', display_settings=False): ' Wrapper that loads the transformer model from checkpoint path ' all_states = torch.load(from_path, map_location='cpu') config = all_states['Settings']['Config'] paras = all_states['Settings']['Paras'] if (not hasattr(paras, 'multi_gpu')): setattr(paras, 'multi_gpu', False) if ('prune_headids' not in config['transformer']): config['transformer']['prune_headids'] = None if display_settings: for cluster in config: print((cluster + ':')) for item in config[cluster]: print((('\t' + str(item)) + ': '), config[cluster][item]) print('paras:') v_paras = vars(paras) for item in v_paras: print((('\t' + str(item)) + ': '), v_paras[item]) from transformer.solver import Tester tester = Tester(config, paras) tester.set_model(inference=True, with_head=False, from_path=from_path) return tester
def compute_lnsr(real, adve, norm_L2=True): real = real.reshape(real.shape[0], (- 1)) adve = adve.reshape(adve.shape[0], (- 1)) l2 = np.linalg.norm((real - adve), ord=2) if norm_L2: l2 /= np.linalg.norm(real, ord=2) return l2
def run_over_layer(layer, real_todo, adve_todo): if (layer != 'feature'): options = {'ckpt_file': 'result/result_transformer/mockingjay/LinearLarge-libri/model-500000.ckpt', 'load_pretrain': 'True', 'no_grad': 'True', 'dropout': 'default', 'spec_aug': 'False', 'spec_aug_prev': 'True', 'weighted_sum': 'False', 'select_layer': layer, 'permute_input': 'True'} mockingjay = TRANSFORMER(options=options, inp_dim=160) mockingjay.eval() episode = [] for i in range(len(real_todo)): r = torch.FloatTensor(np.load(real_todo[i])).unsqueeze(1) a = torch.FloatTensor(np.load(adve_todo[i])).unsqueeze(1) min_len = min(r.shape[0], a.shape[0]) if (layer == 'feature'): l2 = compute_lnsr(r[:min_len].data.cpu().numpy(), a[:min_len].data.cpu().numpy(), norm_L2=True) else: r_hidd = mockingjay(r[:min_len]) a_hidd = mockingjay(a[:min_len]) l2 = compute_lnsr(r_hidd.data.cpu().numpy(), a_hidd.data.cpu().numpy(), norm_L2=True) episode.append(l2) return np.mean(episode)
def main(): num_layers = 12 data_path = 'data/adversarial/' data_type = ['fgsm_8.0', 'fgsm_16.0', 'pgd_8.0', 'pgd_16.0'] data_type = data_type[0] real_data = os.path.join(data_path, 'original') adve_data = os.path.join(data_path, data_type) real_todo = sorted(list(Path(real_data).rglob('*.npy'))) adve_todo = sorted(list(Path(adve_data).rglob('*.npy'))) assert (len(real_todo) == len(adve_todo)) print('Number of data: ', len(real_todo)) dist = run_over_layer('feature', real_todo, adve_todo) print((('[Original v.s. ' + data_type) + '] Acoustic distance: '), dist) dist = [] for i in range(num_layers): m = run_over_layer(i, real_todo, adve_todo) dist.append(m) print('Layer: ', (i + 1), 'Mse: ', m) print((('[Original v.s. ' + data_type) + '] Hidden rep distance over all layers (1->12): '), dist)
def get_speaker_from_path(x): return x.split('/')[(- 1)].split('.')[0].split('-')[0]
def get_all_speakers(X): speaker_set = {} for x in X: speaker = get_speaker_from_path(x) if (speaker not in speaker_set): speaker_set[speaker] = 0 else: speaker_set[speaker] += 1 return speaker_set
def compute_speaker2idx(speakers): idx = 0 speaker2idx = {} for speaker in sorted(speakers): if ((speaker not in speaker2idx) and (speakers[speaker] > SPEAKER_THRESHOLD)): speaker2idx[speaker] = idx idx += 1 return speaker2idx
def main(): tables = pd.read_csv(os.path.join(root, ('train-clean-100' + '.csv'))) print('[Dataset] - Computing speaker class...') O = tables['file_path'].tolist() speakers = get_all_speakers(O) speaker2idx = compute_speaker2idx(speakers) class_num = len(speaker2idx) print('[Dataset] - Possible speaker classes: ', class_num) train = tables.sample(frac=0.9, random_state=20190929) test = tables.drop(train.index) table = train.sort_values(by=['length'], ascending=False) X = table['file_path'].tolist() X_lens = table['length'].tolist() if (drop and (max_timestep > 0)): table = table[(table.length < max_timestep)] if (drop and (max_label_len > 0)): table = table[((table.label.str.count('_') + 1) < max_label_len)] num_utt = [] for speaker in speakers: if (speaker in speaker2idx): num_utt.append(speakers[speaker]) print('Average utterance per speaker: ', np.mean(num_utt))
def print_cache_path(url: str, refresh: bool): print(_urls_to_filepaths(url, refresh=refresh))
def get_ttest_args(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--mode', choices=['ttest', 'fisher', 'mcnemar'], default='ttest') parser.add_argument('-em', '--evaluate_metric', default='acc') parser.add_argument('-t', '--evaluate_split', default='test') parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority') parser.add_argument('-e1', '--past_exp1', metavar='{CKPT_PATH,CKPT_DIR}', help='Load from a checkpoint') parser.add_argument('-e2', '--past_exp2', metavar='{CKPT_PATH,CKPT_DIR}', help='Load from another checkpoint') parser.add_argument('-u1', '--upstream1', default='default', type=str, help='used to override the upstream string for checkpoint e1') parser.add_argument('-u2', '--upstream2', default='default', type=str, help='used to override the upstream string for checkpoint e2') parser.add_argument('--seed', default=1337, type=int) parser.add_argument('--verbose', action='store_true', help='Print model infomation') parser.add_argument('--ckpt_name', default='best-states-dev', help='The string used for searching the checkpoint, example choices: `states-*`, `best-states-dev`, `best-states-test`.') args = parser.parse_args() (args1, config1) = get_past_exp(args, args.past_exp1, args.ckpt_name) (args2, config2) = get_past_exp(args, args.past_exp2, args.ckpt_name) if (args.upstream1 != 'default'): args1.upstream = args.upstream1 if (args.upstream2 != 'default'): args2.upstream = args.upstream2 return (args.mode, args1, config1, args2, config2)
def get_past_exp(args, past_exp, name): if os.path.isdir(past_exp): ckpt_pths = glob.glob(os.path.join(past_exp, f'{name}.ckpt')) assert (len(ckpt_pths) > 0) if (len(ckpt_pths) == 1): ckpt_pth = ckpt_pths[0] else: ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0]))) ckpt_pth = ckpt_pths[(- 1)] else: ckpt_pth = past_exp print(f'[Runner] - Loading from {ckpt_pth}') ckpt = torch.load(ckpt_pth, map_location='cpu') def update_args(old, new, preserve_list=None): out_dict = vars(old) new_dict = vars(new) for key in list(new_dict.keys()): if (key in preserve_list): new_dict.pop(key) out_dict.update(new_dict) return Namespace(**out_dict) cannot_overwrite_args = ['mode', 'evaluate_split', 'override', 'backend', 'local_rank', 'past_exp'] args = update_args(args, ckpt['Args'], preserve_list=cannot_overwrite_args) args.init_ckpt = ckpt_pth args.mode = 'evaluate' config = ckpt['Config'] if args.override: override(args.override, args, config) return (args, config)
class Tester(Runner): '\n Used to handle the evaluation loop and return the testing records for Paired Sample T-test.\n ' def __init__(self, args, config): super(Tester, self).__init__(args, config) def evaluate(self): 'evaluate function will always be called on a single process even during distributed training' split = self.args.evaluate_split random.seed(self.args.seed) np.random.seed(self.args.seed) torch.manual_seed(self.args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(self.args.seed) with torch.cuda.device(self.args.device): torch.cuda.empty_cache() self.downstream.eval() self.upstream.eval() dataloader = self.downstream.get_dataloader(split) records = defaultdict(list) for (batch_id, (wavs, *others)) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc=split)): wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs] with torch.no_grad(): features = self.upstream(wavs) self.downstream(split, features, *others, records=records) return records
def process_records(records, metric): assert ('sample_wise_metric' in records), 'Utterance-wise / sample-wise metric is necessary for proceeding the Paired Sample T-test.' average = torch.FloatTensor(records[metric]).mean().item() return (average, records['sample_wise_metric'])
def main(): torch.multiprocessing.set_sharing_strategy('file_system') torchaudio.set_audio_backend('sox_io') hack_isinstance() (mode, args1, config1, args2, config2) = get_ttest_args() random.seed(args1.seed) np.random.seed(args1.seed) torch.manual_seed(args1.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args1.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False tester1 = Tester(args1, config1) records1 = eval(f'tester1.{args1.mode}')() (average1, sample_metric1) = process_records(records1, args1.evaluate_metric) tester2 = Tester(args2, config2) records2 = eval(f'tester2.{args2.mode}')() (average2, sample_metric2) = process_records(records2, args2.evaluate_metric) if (mode == 'ttest'): (statistic, p_value) = stats.ttest_rel(sample_metric1, sample_metric2) elif (mode == 'fisher'): correct1 = sample_metric1.count(True) correct2 = sample_metric2.count(True) contingency_table = [[correct1, correct2], [(len(sample_metric1) - correct1), (len(sample_metric2) - correct2)]] (statistic, p_value) = stats.fisher_exact(contingency_table) elif (mode == 'mcnemar'): correct1 = sample_metric1.count(True) correct2 = sample_metric2.count(True) contingency_table = [[correct1, correct2], [(len(sample_metric1) - correct1), (len(sample_metric2) - correct2)]] b = mcnemar(contingency_table, exact=True) (statistic, p_value) = (b.statistic, b.pvalue) else: raise NotImplementedError print(f'[Runner] - The testing scores of the two ckpts are {average1} and {average2}, respectively.') print(f'[Runner] - The statistic of the significant test of the two ckpts is {statistic}') print(f'[Runner] - The P value of significant test of the two ckpts is {p_value}')
class Timer(): def __init__(self): self.timings = {} self.start_time = 0 def start(self): self.start_time = time.time() def end(self): frameinfo = inspect.getouterframes(inspect.currentframe())[1] filename = frameinfo.filename filename = '/'.join(filename.split('/')[(- 2):]) marker = f'{filename}:{frameinfo.lineno}' if (marker not in self.timings.keys()): self.timings[marker] = [] else: self.timings[marker].append(float((time.time() - self.start_time))) def report(self): n_points = len(self.timings.keys()) if (n_points > 0): print('[TIMER]:') for marker in self.timings: print(f'{marker}: {torch.FloatTensor(self.timings[marker]).mean().item()}') else: print('[TIMER]: No record')
def get_runner_args(): parser = argparse.ArgumentParser(description='Argument Parser for the S3PLR project.') parser.add_argument('--config', default='../config/deprecated_runner/tera_libri_fmllrBase_pretrain,yaml', type=str, help='Path to experiment config.', required=False) parser.add_argument('--seed', default=1337, type=int, help='Random seed for reproducable results.', required=False) parser.add_argument('--logdir', default='../log/log_transformer/', type=str, help='Logging path.', required=False) parser.add_argument('--name', default=None, type=str, help='Name for logging.', required=False) parser.add_argument('--load', action='store_true', help='Load pre-trained model to restore training, no need to specify this during testing.') parser.add_argument('--ckpdir', default='../result/result_transformer/', type=str, help='path to store experiment result.', required=False) parser.add_argument('--ckpt', default='fmllrBase960-F-N-K-libri/states-1000000.ckpt', type=str, help='path to transformer model checkpoint.', required=False) parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False) parser.add_argument('--apc_path', default='../result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False) parser.add_argument('--train', action='store_true', help='Train the model.') parser.add_argument('--run_transformer', action='store_true', help='train and test the downstream tasks using speech representations.') parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.') parser.add_argument('--fine_tune', action='store_true', help='fine tune the transformer model with downstream task.') parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.') parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or speech representations.') parser.add_argument('--test_phone', action='store_true', help='Test mel or speech representations using the trained phone classifier.') parser.add_argument('--train_cpc_phone', action='store_true', help='Train the phone classifier on mel or speech representations with the alignments in CPC paper.') parser.add_argument('--test_cpc_phone', action='store_true', help='Test mel or speech representations using the trained phone classifier with the alignments in CPC paper.') parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or speech representations.') parser.add_argument('--test_sentiment', action='store_true', help='Test mel or speech representations using the trained sentiment classifier.') parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or speech representations.') parser.add_argument('--test_speaker', action='store_true', help='Test mel or speech representations using the trained speaker classifier.') parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.') parser.add_argument('--plot_attention', action='store_true', help='plot attention') parser.add_argument('--load_ws', default='result/result_transformer_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model') parser.add_argument('--cpu', action='store_true', help='Disable GPU training.') parser.add_argument('--multi_gpu', action='store_true', help='Enable Multi-GPU training.') parser.add_argument('--no_msg', action='store_true', help='Hide all messages.') parser.add_argument('--test_reconstruct', action='store_true', help='Test reconstruction capability') args = parser.parse_args() setattr(args, 'gpu', (not args.cpu)) setattr(args, 'verbose', (not args.no_msg)) config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader) parse_prune_heads(config) return (config, args)
def main(): (config, args) = get_runner_args() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if args.train: from transformer.solver import Trainer trainer = Trainer(config, args) trainer.load_data(split='train') trainer.set_model(inference=False) trainer.exec() if args.test_reconstruct: from transformer.solver import Trainer trainer = Trainer(config, args) trainer.load_data(split='test') trainer.set_model(inference=True, with_head=True) trainer.test_reconstruct() elif args.train_phone: from downstream.solver import Downstream_Trainer task = ('transformer_phone' if args.run_transformer else ('apc_phone' if args.run_apc else 'baseline_phone')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='montreal_phone') trainer.set_model(inference=False) trainer.exec() elif args.test_phone: from downstream.solver import Downstream_Tester task = ('transformer_phone' if args.run_transformer else ('apc_phone' if args.run_apc else 'baseline_phone')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='montreal_phone') tester.set_model(inference=True) tester.exec() elif args.train_cpc_phone: from downstream.solver import Downstream_Trainer task = ('transformer_cpc_phone' if args.run_transformer else ('apc_cpc_phone' if args.run_apc else 'baseline_cpc_phone')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='cpc_phone') trainer.set_model(inference=False) trainer.exec() elif args.test_cpc_phone: from downstream.solver import Downstream_Tester task = ('transformer_cpc_phone' if args.run_transformer else ('apc_cpc_phone' if args.run_apc else 'baseline_cpc_phone')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='cpc_phone') tester.set_model(inference=True) tester.exec() elif args.train_sentiment: from downstream.solver import Downstream_Trainer task = ('transformer_sentiment' if args.run_transformer else ('apc_sentiment' if args.run_apc else 'baseline_sentiment')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='sentiment') trainer.set_model(inference=False) trainer.exec() elif args.test_sentiment: from downstream.solver import Downstream_Tester task = ('transformer_sentiment' if args.run_transformer else ('apc_sentiment' if args.run_apc else 'baseline_sentiment')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='sentiment') tester.set_model(inference=True) tester.exec() elif args.train_speaker: from downstream.solver import Downstream_Trainer task = ('transformer_speaker' if args.run_transformer else ('apc_speaker' if args.run_apc else 'baseline_speaker')) trainer = Downstream_Trainer(config, args, task=task) trainer.load_data(split='train', load='speaker') trainer.set_model(inference=False) trainer.exec() elif args.test_speaker: from downstream.solver import Downstream_Tester task = ('transformer_speaker' if args.run_transformer else ('apc_speaker' if args.run_apc else 'baseline_speaker')) tester = Downstream_Tester(config, args, task=task) tester.load_data(split='test', load='speaker') tester.set_model(inference=True) tester.exec() elif args.plot: from transformer.solver import Tester tester = Tester(config, args) tester.load_data(split='test', load_mel_only=True) tester.set_model(inference=True, with_head=args.with_head) tester.plot(with_head=args.with_head) elif args.plot_attention: from transformer.solver import Tester tester = Tester(config, args) tester.load_data(split='test', load_mel_only=True) tester.set_model(inference=True, output_attention=True) tester.plot_attention()
def pytest_addoption(parser): parser.addoption('--runupstream', action='store_true', help='run upstream tests') parser.addoption('--runslow', action='store_true', help='run slow tests') parser.addoption('--runcorpus', action='store_true', help='run tests with corpus path dependency') parser.addoption('--practice', action='store_true', help='for test scripts only for practice and not real test cases.') parser.addoption('--runextra', action='store_true', help='run tests with extra dependencies') parser.addoption('--fairseq', action='store_true', help='run tests with fairseq dependencies') parser.addoption('--upstream_names', action='store')
def pytest_generate_tests(metafunc): option_value = metafunc.config.option.upstream_names if ('upstream_names' in metafunc.fixturenames): metafunc.parametrize('upstream_names', [option_value])
def pytest_configure(config): config.addinivalue_line('markers', 'upstream: mark test as a upstream test case') config.addinivalue_line('markers', 'slow: mark test as slow to run') config.addinivalue_line('markers', 'corpus: mark test as required corpus path dependency') config.addinivalue_line('markers', 'extra_dependency: mask test requiring extra dependencies to run') config.addinivalue_line('markers', 'practice: mark test as a practice') config.addinivalue_line('markers', 'fairseq: mark test as a fairseq')
def pytest_collection_modifyitems(config, items): if (not config.getoption('--runupstream')): skip_upstream = pytest.mark.skip(reason='need --runupstream option to run') for item in items: if ('upstream' in item.keywords): item.add_marker(skip_upstream) if (not config.getoption('--runslow')): skip_slow = pytest.mark.skip(reason='need --runslow option to run') for item in items: if ('slow' in item.keywords): item.add_marker(skip_slow) if (not config.getoption('--runcorpus')): skip_corpus = pytest.mark.skip(reason='need --runcorpus option to run') for item in items: if ('corpus' in item.keywords): item.add_marker(skip_corpus) if (not config.getoption('--practice')): skip_practice = pytest.mark.skip(reason='need --practice option to run') for item in items: if ('practice' in item.keywords): item.add_marker(skip_practice) if (not config.getoption('--runextra')): skip_extra = pytest.mark.skip(reason='need --runextra option to run') for item in items: if ('extra_dependency' in item.keywords): item.add_marker(skip_extra) if (not config.getoption('--fairseq')): skip_extra = pytest.mark.skip(reason='need --fairseq option to run') for item in items: if ('fairseq' in item.keywords): item.add_marker(skip_extra)
class Helper(): pass
@pytest.fixture def helpers(): return Helper
@pytest.mark.parametrize('vocab_type', ['subword', 'character']) def test_superb_asr(vocab_type): if (vocab_type == 'subword'): vocab_args = {'vocab_size': 18} else: vocab_args = {} with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestASR(SuperbASR): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): all_wav_paths = wav_paths all_text = ['hello how are you today', 'fine', 'oh', 'I think is good', 'maybe okay'] ids = list(range(len(all_wav_paths))) df = pd.DataFrame(data={'id': ids, 'wav_path': all_wav_paths, 'transcription': all_text}) train_path = (Path(target_dir) / 'train.csv') valid_path = (Path(target_dir) / 'valid.csv') test_path = (Path(target_dir) / 'test.csv') df.iloc[:3].to_csv(train_path, index=False) df.iloc[3:4].to_csv(valid_path, index=False) df.iloc[4:].to_csv(test_path, index=False) return (train_path, valid_path, [test_path]) problem = TestASR() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_tokenizer'] = {'vocab_type': vocab_type, 'vocab_args': vocab_args} config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_er(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestER(SuperbER): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): ids = [Path(path).stem for path in wav_paths] labels = ['a', 'b', 'a', 'c', 'd'] start_secs = [0.0, 0.1, 0.2, None, 0.0] end_secs = [5.2, 1.0, 0.3, None, 4.9] df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': labels, 'start_sec': start_secs, 'end_sec': end_secs}) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestER() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)
def test_superb_ks(): with tempfile.TemporaryDirectory() as tempdir: with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples): class TestKS(SuperbKS): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): ids = [Path(path).stem for path in wav_paths] labels = ['a', 'b', 'a', 'c', 'd'] start_secs = [0.0, 0.1, 0.2, None, 0.0] end_secs = [5.2, 1.0, 0.3, None, 4.9] df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': labels, 'start_sec': start_secs, 'end_sec': end_secs}) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') df.to_csv(train_csv) df.to_csv(valid_csv) df.to_csv(test_csv) return (train_csv, valid_csv, [test_csv]) problem = TestKS() config = problem.default_config() config['target_dir'] = tempdir config['device'] = 'cpu' config['train']['total_steps'] = 4 config['train']['log_step'] = 1 config['train']['eval_step'] = 2 config['train']['save_step'] = 2 config['eval_batch'] = 2 config['build_upstream']['name'] = 'fbank' problem.run(**config)