code stringlengths 17 6.64M |
|---|
def gsc1_for_classification(target_dir: str, cache_dir: str, gsc1: str, gsc1_test: str, get_path_only: bool=False):
'\n Prepare Google Speech Command for classfication task\n following :obj:`SuperbKS.prepare_data` format.\n\n Args:\n gsc1 (str): The root path of the Google Speech Command V1 training set\n gsc1_test (str): The root path of the Google Speech Command V1 test set\n **others: refer to :obj:`SuperbKS.prepare_data`\n '
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_path_only:
return (train_path, valid_path, test_paths)
def gsc_v1_for_superb(gsc1: str, gsc1_test: str):
corpus = SpeechCommandsV1(gsc1, gsc1_test)
def format_fields(data: dict):
import torchaudio
formated_data = OrderedDict()
for (key, value) in data.items():
data_point = {'wav_path': value['wav_path'], 'label': value['class_name'], 'start_sec': None, 'end_sec': None}
if (value['class_name'] == '_silence_'):
info = torchaudio.info(value['wav_path'])
for start in list(range(info.num_frames))[::info.sample_rate]:
seg = data_point.copy()
end = min((start + (1 * info.sample_rate)), info.num_frames)
seg['start_sec'] = (start / info.sample_rate)
seg['end_sec'] = (end / info.sample_rate)
formated_data[f'{key}_{start}_{end}'] = seg
else:
formated_data[key] = data_point
return formated_data
(train_data, valid_data, test_data) = corpus.data_split
return (format_fields(train_data), format_fields(valid_data), format_fields(test_data))
(train_data, valid_data, test_data) = gsc_v1_for_superb(gsc1, gsc1_test)
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data['id'] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return (train_path, valid_path, test_paths)
|
class SuperbKS(SuperbSID):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(gsc1=MISSING, gsc1_test=MISSING), build_encoder=dict(), build_dataset=dict(train=dict(sox_effects=[['channels', '1'], ['rate', '16000'], ['gain', '-3.0']]), valid=dict(sox_effects=[['channels', '1'], ['rate', '16000'], ['gain', '-3.0']]), test=dict(sox_effects=[['channels', '1'], ['rate', '16000'], ['gain', '-3.0']])), build_batch_sampler=dict(train=dict(batch_size=32), valid=dict(batch_size=32), test=dict(batch_size=32)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
'\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`gsc1_for_classification` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n support arguments in :obj:`gsc1_for_classification`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n label (str) - a string label of the waveform\n start_sec (float) - optional, load the waveform from :code:`start_sec` seconds. If not presented or is :code:`math.nan`, load from the beginning.\n end_sec (float) - optional, load the waveform from :code:`end_sec` seconds. If not presented or is :code:`math.nan`, load to the end.\n ==================== ====================\n '
return gsc1_for_classification(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False):
'\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoder` from the :code:`label` column of all the csv files.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n tokenizer_path: The tokenizer should be saved in the pickle format\n '
encoder_path = (Path(target_dir) / 'encoder.pkl')
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
labels = all_csv['label'].tolist()
encoder = CategoryEncoder(labels)
with open(encoder_path, 'wb') as f:
pickle.dump(encoder, f)
return encoder
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset: Dataset):
'\n Return the batch sampler for torch DataLoader.\n By default for train and valid, use :obj:`BalancedWeightedSampler`; for test use\n :obj:`FixedBatchSizeBatchSampler`\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`BalancedWeightedSampler`\n valid (dict) - arguments for :obj:`BalancedWeightedSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
def _build_batch_sampler(train: dict=None, valid: dict=None, test: dict=None):
train = (train or {})
valid = (valid or {})
test = (test or {})
csv = pd.read_csv(data_csv)
labels = csv['label'].tolist()
if (mode == 'train'):
return BalancedWeightedSampler(labels, **train)
elif (mode == 'valid'):
return BalancedWeightedSampler(labels, **valid)
elif (mode == 'test'):
return FixedBatchSizeBatchSampler(csv, **test)
return _build_batch_sampler(**build_batch_sampler)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_downsample_rate: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`MeanPoolingLinear` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`,\n support arguments of :obj:`MeanPoolingLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`AbsUtteranceModel`\n "
model = MeanPoolingLinear(downstream_input_size, downstream_output_size, **build_downstream)
return model
|
def voxceleb1_for_sid(target_dir: str, cache_dir: str, dataset_root: str, n_jobs: int=6, get_path_only: bool=False):
'\n Prepare VoxCeleb1 for SID following :obj:`SuperbSID.prepare_data` format.\n\n Args:\n dataset_root (str): The root path of VoxCeleb1\n n_jobs (int): to speed up the corpus parsing procedure\n **others: refer to :obj:`SuperbSID.prepare_data`\n '
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_path_only:
return (train_path, valid_path, test_paths)
corpus = VoxCeleb1SID(dataset_root, n_jobs)
(train_data, valid_data, test_data) = corpus.data_split
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data['id'] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return (train_path, valid_path, test_paths)
|
class SuperbSID(Common):
'\n The standard SUPERB SID task\n '
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_encoder=dict(), build_dataset=dict(train=dict(max_secs=8.0)), build_batch_sampler=dict(train=dict(batch_size=8, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=500, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate=4, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
'\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`voxceleb1_for_sid` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`, support arguments in :obj:`voxceleb1_for_sid`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n label (str) - a string label of the waveform\n start_sec (float) - optional, load the waveform from :code:`start_sec` seconds. If not presented or is :code:`math.nan`, load from the beginning.\n end_sec (float) - optional, load the waveform from :code:`end_sec` seconds. If not presented or is :code:`math.nan`, load to the end.\n ==================== ====================\n '
return voxceleb1_for_sid(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False):
'\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoder` from the :code:`label` column of all the csv files.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n encoder_path: The encoder should be saved in the pickle format\n '
encoder_path = (Path(target_dir) / 'encoder.pkl')
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
labels = all_csv['label'].tolist()
encoder = CategoryEncoder(labels)
with open(encoder_path, 'wb') as f:
pickle.dump(encoder, f)
return encoder_path
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int):
'\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`. with :code:`train`, :code:`valid`, :code:`test` keys, each\n is a dictionary with the following supported options:\n\n ==================== ====================\n key description\n ==================== ====================\n max_secs (float) - If a waveform is longer than :code:`max_secs` seconds, randomly crop the waveform into :code:`max_secs` seconds\n sox_effects (List[List[str]]) - If not None, apply sox effects on the utterance\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n encoder_path (str): The pickled encoder path for encoding the labels\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_id (int) - the encoded class id\n label (str) - the class name\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n '
@dataclass
class Config():
train: dict = None
valid: dict = None
test: dict = None
conf = Config(**build_dataset)
assert (mode in ['train', 'valid', 'test'])
if (mode == 'train'):
conf = (conf.train or {})
elif (mode == 'valid'):
conf = (conf.valid or {})
elif (mode == 'test'):
conf = (conf.test or {})
@dataclass
class SplitConfig():
max_secs: float = None
sox_effects: List[List[str]] = None
conf = SplitConfig(**conf)
csv = pd.read_csv(data_csv)
start_secs = None
if ('start_sec' in csv.columns):
start_secs = csv['start_sec'].tolist()
start_secs = [(None if math.isnan(sec) else sec) for sec in start_secs]
end_secs = None
if ('end_sec' in csv.columns):
end_secs = csv['end_sec'].tolist()
end_secs = [(None if math.isnan(sec) else sec) for sec in end_secs]
audio_loader = LoadAudio(csv['wav_path'].tolist(), start_secs, end_secs, max_secs=conf.max_secs, sox_effects=conf.sox_effects)
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
label_encoder = EncodeCategory(csv['label'].tolist(), encoder)
ids = csv['id'].tolist()
class Dataset():
def __len__(self):
return len(ids)
def __getitem__(self, index: int):
audio = audio_loader[index]
label = label_encoder[index]
return {'x': audio['wav'], 'x_len': audio['wav_len'], 'label': label['label'], 'class_id': label['class_id'], 'unique_name': ids[index]}
dataset = Dataset()
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset):
'\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
def _superb_sid_batch_sampler(train: dict=None, valid: dict=None, test: dict=None):
train = (train or {})
valid = (valid or {})
test = (test or {})
if (mode == 'train'):
sampler = FixedBatchSizeBatchSampler(dataset, **train)
elif (mode == 'valid'):
sampler = FixedBatchSizeBatchSampler(dataset, **valid)
elif (mode == 'test'):
sampler = FixedBatchSizeBatchSampler(dataset, **test)
return sampler
return _superb_sid_batch_sampler(**build_batch_sampler)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`MeanPoolingLinear` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, support arguments of :obj:`MeanPoolingLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsUtteranceModel`\n "
model = MeanPoolingLinear(downstream_input_size, downstream_output_size, **build_downstream)
return model
|
class Diarization(Problem):
def run(self, target_dir: str, cache_dir: str, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, num_speaker: int=2, prepare_data: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None, scoring: dict=None):
'\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the Kaldi-style data directory for speaker diarization\n 1 Train the model\n 2 Inference the prediction\n 3 Score the prediction\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use checkpoints specified by\n :code:`test_ckpts_steps`.\n num_speaker (int):\n How many speakers per utterance\n **others:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n '
yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml')
yaml_path.parent.mkdir(exist_ok=True, parents=True)
with yaml_path.open('w') as f:
yaml.safe_dump(self._get_current_arguments(), f)
cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data'))
prepare_data: dict = (prepare_data or {})
build_dataset: dict = (build_dataset or {})
build_batch_sampler: dict = (build_batch_sampler or {})
build_collate_fn: dict = (build_collate_fn or {})
build_upstream: dict = (build_upstream or {})
build_featurizer: dict = (build_featurizer or {})
build_downstream: dict = (build_downstream or {})
build_model: dict = (build_model or {})
build_task: dict = (build_task or {})
build_optimizer: dict = (build_optimizer or {})
build_scheduler: dict = (build_scheduler or {})
save_model: dict = (save_model or {})
save_task: dict = (save_task or {})
train: dict = (train or {})
evaluate: dict = (evaluate or {})
scoring: dict = (scoring or {})
target_dir: Path = Path(target_dir)
target_dir.mkdir(exist_ok=True, parents=True)
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True, parents=True)
if remove_all_cache:
shutil.rmtree(cache_dir, ignore_errors=True)
stage_id = 0
if (start <= stage_id):
logger.info(f'Stage {stage_id}: prepare data')
(train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False)
(train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True)
def check_fn():
assert (Path(train_csv).is_file() and Path(valid_csv).is_file())
for test_csv in test_csvs:
assert Path(test_csv).is_file()
self._stage_check(stage_id, stop, check_fn)
for csv in [train_csv, valid_csv, *test_csvs]:
data_dir = ((target_dir / 'kaldi_data') / Path(csv).stem)
csv_to_kaldi_dir(csv, data_dir)
train_data = ((target_dir / 'kaldi_data') / Path(train_csv).stem)
valid_data = ((target_dir / 'kaldi_data') / Path(valid_csv).stem)
test_datas = [((target_dir / 'kaldi_data') / Path(csv).stem) for csv in test_csvs]
test_rttms = []
for test_data in test_datas:
logger.info(f'Prepare RTTM for {test_data}')
test_rttm = (target_dir / f'{Path(test_data).stem}.rttm')
kaldi_dir_to_rttm(test_data, test_rttm)
test_rttms.append(test_rttm)
model_output_size = num_speaker
model = self.build_model(build_model, model_output_size, build_upstream, build_featurizer, build_downstream)
frame_shift = model.downsample_rate
stage_id = 1
train_dir = (target_dir / 'train')
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Train Model')
(train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, train_data, num_speaker, frame_shift, build_dataset, build_batch_sampler)
(valid_ds, valid_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'valid', valid_csv, valid_data, num_speaker, frame_shift, build_dataset, build_batch_sampler)
build_model_all_args = dict(build_model=build_model, model_output_size=model_output_size, build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream)
build_task_all_args_except_model = dict(build_task=build_task)
self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), valid_ds, valid_bs, self.build_collate_fn(build_collate_fn, 'valid'), device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank)
def check_fn():
assert (train_dir / 'valid_best').is_dir()
self._stage_check(stage_id, stop, check_fn)
stage_id = 2
test_ckpt_dir: Path = Path((test_ckpt_dir or ((target_dir / 'train') / 'valid_best')))
test_dirs = []
for (test_idx, test_data) in enumerate(test_datas):
test_name = Path(test_data).stem
test_dir: Path = (((target_dir / 'evaluate') / test_ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) / test_name)
test_dirs.append(test_dir)
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Test model: {test_ckpt_dir}')
for (test_idx, test_data) in enumerate(test_datas):
test_csv = test_csvs[test_idx]
test_dir = test_dirs[test_idx]
test_dir.mkdir(exist_ok=True, parents=True)
logger.info(f'Stage {stage_id}.{test_idx}: Test model on {test_dir} and dump prediction')
(test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, test_data, num_speaker, frame_shift, build_dataset, build_batch_sampler)
(_, valid_best_task) = self.load_model_and_task(test_ckpt_dir)
logs: dict = self.evaluate(evaluate, 'test', valid_best_task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers)
test_metrics = {name: float(value) for (name, value) in logs.items()}
with (test_dir / f'result.yaml').open('w') as f:
yaml.safe_dump(test_metrics, f)
def check_fn():
for test_dir in test_dirs:
assert (test_dir / 'prediction').is_dir()
self._stage_check(stage_id, stop, check_fn)
stage_id = 3
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Score model: {test_ckpt_dir}')
self.scoring(scoring, stage_id, test_dirs, test_rttms, frame_shift)
return stage_id
def scoring(self, scoring: dict, stage_id: int, test_dirs: List[str], test_rttms: List[str], frame_shift: int):
'\n Score the prediction\n\n Args:\n scoring (dict):\n\n ==================== ====================\n key description\n ==================== ====================\n thresholds (List[int]) - Given the 0~1 (float) soft prediction, the threshold decides how to get the 0/1 hard prediction. This list are all the thresholds to try.\n median_filters (List[int]) - After getting hard prediction, use median filter to smooth out the prediction. This list are all the median filter sizes to try.\n ==================== ====================\n\n *others:\n This method is not designed to be overridden\n '
@dataclass
class ScoreConfig():
thresholds: List[int]
median_filters: List[int]
conf = ScoreConfig(**scoring)
for (test_idx, test_dir) in enumerate(test_dirs):
logger.info(f'Stage {stage_id}.{test_idx}: Make RTTM and Score from prediction')
(best_der, (best_th, best_med)) = make_rttm_and_score((test_dir / 'prediction'), (test_dir / 'score'), test_rttms[test_idx], frame_shift, conf.thresholds, conf.median_filters)
logger.info(f'Best dscore DER: {best_der}')
with (test_dir / 'dscore.yaml').open('w') as f:
yaml.safe_dump(dict(der=best_der, threshold=best_th, median_filter=best_med), f)
def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, data_dir: str, num_speakers: int, frame_shift: int, build_dataset: dict, build_batch_sampler: dict):
logger.info(f'Build {mode} dataset')
dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, data_dir, num_speakers, frame_shift)
logger.info(f'Build {mode} batch sampler')
batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, data_dir, dataset)
return (dataset, batch_sampler)
def build_task(self, build_task: dict, model):
'\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`DiarizationPIT`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n\n Returns:\n Task\n '
task = DiarizationPIT(model)
return task
|
class SuperbSD(Diarization):
def default_config(self):
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(data_dir=MISSING), build_dataset=dict(chunk_size=2000, subsampling=1, rate=16000, use_last_samples=True, label_delay=0), build_batch_sampler=dict(train=dict(batch_size=8, shuffle=True), valid=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=512, rnn_layers=1), build_model=dict(upstream_trainable=False), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=30000, log_step=500, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate=4, valid_metric='der', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None), scoring=dict(thresholds=[0.3, 0.4, 0.5, 0.6, 0.7], median_filters=[1, 11]))
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False):
'\n Prepare the task-specific data metadata (path, labels...).\n\n Args:\n prepare_data (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n data_dir (str) - the standard Kaldi data directory\n ==================== ====================\n\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n record_id (str) - the id for the recording\n duration (float) - the total seconds of the recording\n wav_path (str) - the absolute path of the recording\n utt_id (str) - the id for the segmented utterance, should be globally unique across all recordings instead of just unique in a recording\n speaker (str) - the speaker label for the segmented utterance\n start_sec (float) - segment start second in the recording\n end_sec (float) - segment end second in the recording\n ==================== ====================\n\n Instead of one waveform file per row, the above file format is one segment per row,\n and a waveform file can have multiple overlapped segments uttered by different speakers.\n '
@dataclass
class Config():
data_dir: str
conf = Config(**prepare_data)
target_dir: Path = Path(target_dir)
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
kaldi_dir_to_csv((Path(conf.data_dir) / 'train'), train_csv)
kaldi_dir_to_csv((Path(conf.data_dir) / 'dev'), valid_csv)
kaldi_dir_to_csv((Path(conf.data_dir) / 'test'), test_csv)
return (train_csv, valid_csv, [test_csv])
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, data_dir: str, num_speakers: int, frame_shift: int):
"\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, supports arguments for :obj:`DiarizationDataset`\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n data_dir (str): The converted kaldi data directory from :code:`data_csv`\n num_speakers (int): The number of speaker per utterance\n frame_shift (int): The frame shift of the upstream model (downsample rate from 16 KHz)\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n label (torch.LongTensor) - the binary label for each upstream frame, shape: :code:`(upstream_len, 2)`\n label_len (int) - the upstream feature's seq length :code:`upstream_len`\n record_id (str) - the unique id for the recording\n chunk_id (int) - since recording can be chunked into several segments for efficient training, this field indicate the segment's original position (order, 0-index) in the recording. This field is only useful during the testing stage\n ==================== ====================\n "
dataset = DiarizationDataset(mode, data_dir, frame_shift=frame_shift, num_speakers=num_speakers, **build_dataset)
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, data_dir: str, dataset):
'\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`GroupSameItemSampler`, should always use this batch sampler for the testing stage\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n data_dir (str): The converted kaldi data directory from :code:`data_csv`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
@dataclass
class Config():
train: dict = None
valid: dict = None
conf = Config(**build_batch_sampler)
if (mode == 'train'):
return FixedBatchSizeBatchSampler(dataset, **(conf.train or {}))
elif (mode == 'valid'):
return FixedBatchSizeBatchSampler(dataset, **(conf.valid or {}))
elif (mode == 'test'):
record_ids = get_info(dataset, ['record_id'])
return GroupSameItemSampler(record_ids)
else:
raise ValueError(f'Unsupported mode: {mode}')
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`SuperbDiarizationModel` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, support arguments of :obj:`SuperbDiarizationModel`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFrameModel`\n "
return SuperbDiarizationModel(downstream_input_size, downstream_output_size, **build_downstream)
|
def kaldi_dir_to_rttm(data_dir: str, rttm_path: str):
data_dir: Path = Path(data_dir)
segments_file = (data_dir / 'segments')
utt2spk_file = (data_dir / 'utt2spk')
assert segments_file.is_file()
assert utt2spk_file.is_file()
utt2spk = {}
with utt2spk_file.open() as f:
for utt2spk_line in f.readlines():
fields = utt2spk_line.strip().replace('\n', ' ').split()
assert (len(fields) == 2)
(utt, spk) = fields
utt2spk[utt] = spk
with Path(rttm_path).open('w') as rttm_f:
with segments_file.open() as f:
for segment_line in f.readlines():
fields = segment_line.strip().replace('\t', ' ').split()
assert (len(fields) == 4)
(utt, reco, start, end) = fields
spk = utt2spk[utt]
print(RTTM_FORMAT.format(reco, float(start), (float(end) - float(start)), spk), file=rttm_f)
|
def make_rttm_and_score(prediction_dir: str, score_dir: str, gt_rttm: str, frame_shift: int, thresholds: List[int], medians: List[int], subsampling: int=1, sampling_rate: int=16000):
Path(score_dir).mkdir(exist_ok=True, parents=True)
dscore_dir = (Path(score_dir) / 'dscore')
rttm_dir = (Path(score_dir) / 'rttm')
result_dir = (Path(score_dir) / 'result')
setting2dscore = []
for th in thresholds:
for med in medians:
logger.info(f'Make RTTM with threshold {th}, median filter {med}')
rttm_file = (rttm_dir / f'threshold-{th}_median-{med}.rttm')
make_rttm(prediction_dir, rttm_file, th, med, frame_shift, subsampling, sampling_rate)
logger.info(f'Scoring...')
result_file = (result_dir / f'threshold-{th}_median-{med}.result')
overall_der = score_with_dscore(dscore_dir, rttm_file, gt_rttm, result_file)
logger.info(f'DER: {overall_der}')
setting2dscore.append(((th, med), overall_der))
setting2dscore.sort(key=(lambda x: x[1]))
((best_th, best_med), best_der) = setting2dscore[0]
return (best_der, (best_th, best_med))
|
def make_rttm(prediction_dir: str, out_rttm_path: str, threshold: int, median: int, frame_shift: int, subsampling: int, sampling_rate: int):
names = sorted([name for name in os.listdir(prediction_dir)])
filepaths = [(Path(prediction_dir) / name) for name in names]
Path(out_rttm_path).parent.mkdir(exist_ok=True, parents=True)
with open(out_rttm_path, 'w') as wf:
for filepath in filepaths:
(session, _) = os.path.splitext(os.path.basename(filepath))
data = torch.load(filepath).numpy()
a = np.where((data > threshold), 1, 0)
if (median > 1):
a = medfilt(a, (median, 1))
factor = ((frame_shift * subsampling) / sampling_rate)
for (spkid, frames) in enumerate(a.T):
frames = np.pad(frames, (1, 1), 'constant')
(changes,) = np.where((np.diff(frames, axis=0) != 0))
for (s, e) in zip(changes[::2], changes[1::2]):
print(RTTM_FORMAT.format(session, (s * factor), ((e - s) * factor), ((session + '_') + str(spkid))), file=wf)
|
def score_with_dscore(dscore_dir: str, hyp_rttm: str, gt_rttm: str, score_result: str) -> float:
"\n This function returns the overall DER score, and will also write the detailed scoring results\n to 'score_result'\n "
dscore_dir: Path = Path(dscore_dir)
Path(score_result).parent.mkdir(exist_ok=True, parents=True)
if (not dscore_dir.is_dir()):
logger.info(f'Cloning dscore into {dscore_dir}')
subprocess.check_output(f'git clone https://github.com/nryant/dscore.git {dscore_dir}', shell=True).decode('utf-8')
subprocess.check_call(f'python3 {dscore_dir}/score.py -r {gt_rttm} -s {hyp_rttm} > {score_result}', shell=True)
return get_overall_der_from_dscore_file(score_result)
|
def get_overall_der_from_dscore_file(score_result: str):
with open(score_result) as file:
lines = file.readlines()
overall_lines = [line for line in lines if ('OVERALL' in line)]
assert (len(overall_lines) == 1)
overall_line = overall_lines[0]
overall_line = re.sub('\t+', ' ', overall_line)
overall_line = re.sub(' +', ' ', overall_line)
overall_der = float(overall_line.split(' ')[3])
return overall_der
|
def csv_to_kaldi_dir(csv: str, data_dir: str):
logger.info(f'Convert csv {csv} into kaldi data directory {data_dir}')
data_dir: Path = Path(data_dir)
data_dir.mkdir(exist_ok=True, parents=True)
df = pd.read_csv(csv)
required = ['record_id', 'wav_path', 'utt_id', 'speaker', 'start_sec', 'end_sec']
for r in required:
assert (r in df.columns)
reco2path = {}
reco2dur = {}
utt2spk = {}
spk2utt = {}
segments = []
for (rowid, row) in tqdm(df.iterrows(), total=len(df)):
(record_id, wav_path, duration, utt_id, speaker, start_sec, end_sec) = (row['record_id'], row['wav_path'], row['duration'], row['utt_id'], row['speaker'], row['start_sec'], row['end_sec'])
if (record_id in reco2path):
assert (wav_path == reco2path[record_id])
else:
reco2path[record_id] = wav_path
if (record_id not in reco2dur):
reco2dur[record_id] = duration
else:
assert (reco2dur[record_id] == duration)
if (utt_id not in utt2spk):
utt2spk[utt_id] = str(speaker)
else:
assert (utt2spk[utt_id] == str(speaker))
if (speaker not in spk2utt):
spk2utt[speaker] = []
spk2utt[speaker].append(utt_id)
segments.append((utt_id, record_id, str(start_sec), str(end_sec)))
with (data_dir / 'wav.scp').open('w') as f:
f.writelines([f'''{reco} {path}
''' for (reco, path) in reco2path.items()])
with (data_dir / 'reco2dur').open('w') as f:
f.writelines([f'''{reco} {dur}
''' for (reco, dur) in reco2dur.items()])
with (data_dir / 'utt2spk').open('w') as f:
f.writelines([f'''{utt} {spk}
''' for (utt, spk) in utt2spk.items()])
with (data_dir / 'spk2utt').open('w') as f:
f.writelines([f'''{spk} {' '.join(utts)}
''' for (spk, utts) in spk2utt.items()])
with (data_dir / 'segments').open('w') as f:
f.writelines([f'''{utt} {record} {start} {end}
''' for (utt, record, start, end) in segments])
|
def kaldi_dir_to_csv(data_dir: str, csv: str):
logger.info(f'Convert kaldi data directory {data_dir} into csv {csv}')
data_dir: Path = Path(data_dir)
assert (data_dir / 'wav.scp').is_file()
assert (data_dir / 'segments').is_file()
assert (data_dir / 'utt2spk').is_file()
assert (data_dir / 'reco2dur').is_file()
reco2path = {}
with (data_dir / 'wav.scp').open() as f:
for line in f.readlines():
line = line.strip()
(reco, path) = line.split(' ')
reco2path[reco] = path
reco2dur = {}
with (data_dir / 'reco2dur').open() as f:
for line in f.readlines():
line = line.strip()
(reco, duration) = line.split(' ')
reco2dur[reco] = float(duration)
utt2spk = {}
with (data_dir / 'utt2spk').open() as f:
for line in f.readlines():
line = line.strip()
(utt, spk) = line.split(' ')
utt2spk[utt] = spk
row = []
with (data_dir / 'segments').open('r') as f:
for line in f.readlines():
line = line.strip()
(utt, reco, start, end) = line.split(' ')
row.append((reco, reco2path[reco], reco2dur[reco], utt, utt2spk[utt], float(start), float(end)))
(recos, wav_paths, durations, utts, spks, starts, ends) = zip(*row)
pd.DataFrame(data=dict(record_id=recos, wav_path=wav_paths, utt_id=utts, speaker=spks, start_sec=starts, end_sec=ends, duration=durations)).to_csv(csv, index=False)
|
class BeijingOpera(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class CremaD(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Dcase2016Task2(HearTimestamp):
@default_cfg(**HearTimestamp.setup.default_except(corpus=dict(CLS=field(dcase_2016_task2, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), output_size=11, hidden_layers=2), task=dict(CLS=field(EventPredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='multilabel', scores=['event_onset_200ms_fms', 'segment_1s_er'], postprocessing_grid={'median_filter_ms': [250], 'min_duration': [125, 250]})))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearTimestamp.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='event_onset_200ms_fms', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**HearTimestamp.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearTimestamp.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class ESC50(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(total_steps=4000, log_step=100, eval_step=500, save_step=100, valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class FSD50k(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=newdict(CLS=FixedBatchSizeBatchSampler, batch_size=10, shuffle=True), task=dict(prediction_type='multilabel', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(total_steps=40000, log_step=100, eval_step=1000, save_step=100, valid_metric='mAP', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class GSC5hr(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class Gtzan(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class GtzanMusicSpeech(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Gunshot(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class LibriCount(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Maestro(HearTimestamp):
@default_cfg(**HearTimestamp.setup.default_except(corpus=dict(CLS=field(maestro, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]')), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), output_size=87, hidden_layers=2), task=dict(CLS=field(EventPredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='multilabel', scores=['event_onset_50ms_fms', 'event_onset_offset_50ms_20perc_fms'], postprocessing_grid={'median_filter_ms': [150], 'min_duration': [50]})))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearTimestamp.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='event_onset_50ms_fms', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**HearTimestamp.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearTimestamp.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
"\n Except 'num_fold', all other fields are for 'run' for every fold. That is, all folds shared the same\n config (training hypers, dataset root, etc) except 'workspace' and 'test_fold' are different\n "
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml')
|
class Nsynth5hr(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['pitch_acc', 'chroma_acc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='pitch_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class HearScene(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field(FixedBatchSizeBatchSampler, '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str), batch_size='???'), valid_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='hubert', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option'), legacy=True), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), hidden_layers=2, pooling='mean'), task=dict(CLS=field(ScenePredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='???', scores='???'))
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus().split(3)
stats = corpus_stats.add(stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.override(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **dict(workspace.environ))
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
@default_cfg(**Trainer.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='???', valid_higher_better='???')))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**Trainer.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**Problem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
"\n Except 'num_fold', all other fields are for 'run' for every fold. That is, all folds shared the same\n config (training hypers, dataset root, etc) except 'workspace' and 'test_fold' are different\n "
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml')
|
class Stroke(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class HearTimestamp(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field(HearTimestampDatapipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field(FixedBatchSizeBatchSampler, '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str), batch_size=5), valid_datapipe=dict(CLS=field(HearTimestampDatapipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=GroupSameItemSampler, item_name='unchunked_id', item_order_name='chunk_index'), test_datapipe=dict(CLS=field(HearTimestampDatapipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=GroupSameItemSampler, item_name='unchunked_id', item_order_name='chunk_index'), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='hubert', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option')), downstream=dict(CLS=field('???', '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str)), task=dict(CLS=field('???', '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str)))
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus().split(3)
stats = corpus_stats.add(stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.override(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **dict(workspace.environ))
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
|
class Tonic(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=N_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(N_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Vocal(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='mAP', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class VoxLingua(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Apc(SslProblem):
'\n Apc pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=RnnApc, input_size=_input_size, num_layers=3, hidden_size=512, dropout=0.1, residual=True), predictor=dict(_cls=PredictorIdentity), task=dict(_cls=AutoregressiveReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Apc problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0001), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
setup_problem_cfg = workspace.get_cfg(cls.setup_problem)
setup_problem_cfg['upstream'].pop('_cls')
setup_problem_cfg['upstream'].pop('input_size')
apc_config = dict(model=dict(paras=setup_problem_cfg['upstream']), data=dict(audio=_audio_config))
all_states = dict(config=apc_config, model=task.upstream.state_dict(), Upstream_Config=apc_config)
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class AudioAlbert(SslProblem):
'\n AudioAlbert pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the AudioAlbert problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0)))
all_states['Upstream_Config']['audio']['target_level'] = _pretrain_task_pipe_config['target_level']
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class SslProblem(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(_cls=field('???', '\nThe corpus class. You can add the **kwargs right below this _cls key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(_cls=field('???', '\nThe datapipe class to be applied to the corpus. You can add the **kwargs right below this _cls key', str)), train_sampler=dict(_cls=field('???', '\nThe batch sampler class. You can add the **kwargs right below this _cls key', str)), valid_datapipe=dict(_cls=field('???', '\nThe datapipe class to be applied to the corpus. You can add the **kwargs right below this _cls key', str)), valid_sampler=dict(_cls=field('???', '\nThe batch sampler class. You can add the **kwargs right below this _cls key', str)), test_datapipe=dict(_cls=field('???', '\nThe datapipe class to be applied to the corpus. You can add the **kwargs right below this _cls key', str)), test_sampler=dict(_cls=field('???', '\nThe batch sampler class. You can add the **kwargs right below this _cls key', str)), upstream=dict(_cls=field(S3PRLUpstream, '\nThe class of the upstream NN model. You can add the **kwargs right below this _cls key', str)), predictor=dict(_cls=field('???', '\nThe class of the predictor NN model class for pre-train. You can add the **kwargs right below this _cls key', str)), task=dict(_cls=field('???', '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this _cls key', str)))
@classmethod
def setup_problem(cls, **cfg):
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
if (not isinstance(cfg.upstream, nn.Module)):
upstream = cfg.upstream._cls(**cfg.upstream.kwds())
else:
upstream = cfg.upstream
stats = Container()
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus._cls(**cfg.corpus.kwds()).split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = AugmentedDynamicItemDataset(train_data, tools=stats)
train_dataset = cfg.train_datapipe._cls(**cfg.train_datapipe.kwds())(train_dataset)
train_sampler = cfg.train_sampler._cls(train_dataset, **cfg.train_sampler.kwds())
stats.add(train_dataset.all_tools())
logger.info('Preparing valid data')
valid_dataset = AugmentedDynamicItemDataset(valid_data, tools=stats)
valid_dataset = cfg.valid_datapipe._cls(**cfg.valid_datapipe.kwds())(valid_dataset)
valid_sampler = cfg.valid_sampler._cls(valid_dataset, **cfg.valid_sampler.kwds())
logger.info('Preparing test data')
test_dataset = AugmentedDynamicItemDataset(test_data, tools=stats)
test_dataset = cfg.test_datapipe._cls(**cfg.test_datapipe.kwds())(test_dataset)
test_sampler = cfg.test_sampler._cls(test_dataset, **cfg.test_sampler.kwds())
logger.info('Preparing model and task')
predictor = cfg.predictor._cls(**stats, **cfg.predictor.kwds())
task = cfg.task._cls(upstream, predictor, workspace=workspace, **stats, **cfg.task.kwds())
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
workspace.environ.update(stats)
|
class Mockingjay(SslProblem):
'\n Mockingjay pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Mockingjay problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0)))
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class Npc(SslProblem):
'\n Npc pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=CnnNpc, input_size=_input_size, kernel_size=15, mask_size=5, n_blocks=4, hidden_size=512, dropout=0.1, residual=True, batch_norm=True, activate='relu', disable_cross_layer=False, vq=dict(codebook_size=[64, 64, 64, 64], code_dim=[128, 128, 128, 128], gumbel_temperature=1.0)), predictor=dict(_cls=PredictorIdentity), task=dict(_cls=FeatReconstructionTask, loss=L1Loss, loss_config=dict(reduction='mean')))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Npc problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
setup_problem_cfg = workspace.get_cfg(cls.setup_problem)
setup_problem_cfg['upstream'].pop('_cls')
setup_problem_cfg['upstream'].pop('input_size')
apc_config = dict(model=dict(paras=setup_problem_cfg['upstream']), data=dict(audio=_audio_config))
all_states = dict(config=apc_config, model=task.upstream.state_dict(), Upstream_Config=apc_config)
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class Tera(SslProblem):
'\n Tera pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Tera problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0)))
all_states['Upstream_Config']['audio']['target_level'] = _pretrain_task_pipe_config['target_level']
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class VqApc(SslProblem):
'\n VqApc pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=RnnApc, input_size=_input_size, num_layers=3, hidden_size=512, dropout=0.1, residual=True, vq=dict(codebook_size=[512], code_dim=[512], gumbel_temperature=0.5)), predictor=dict(_cls=PredictorIdentity), task=dict(_cls=AutoregressiveReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the VqApc problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0001), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
setup_problem_cfg = workspace.get_cfg(cls.setup_problem)
setup_problem_cfg['upstream'].pop('_cls')
setup_problem_cfg['upstream'].pop('input_size')
apc_config = dict(model=dict(paras=setup_problem_cfg['upstream']), data=dict(audio=_audio_config))
all_states = dict(config=apc_config, model=task.upstream.state_dict(), Upstream_Config=apc_config)
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class SuperbASR(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=librispeech_for_speech2text, dataset_root='???'), train_datapipe=dict(CLS=Speech2TextPipe, generate_tokenizer=True), train_sampler=dict(CLS=SortedBucketingSampler, batch_size=32, max_length=2000, shuffle=True), valid_datapipe=dict(CLS=Speech2TextPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=Speech2TextPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=ModelWithSpecaug, model_cfg=dict(CLS=RNNEncoder, module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_cfg=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), task=dict(CLS=Speech2TextCTCTask)))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='wer', valid_higher_better=False)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbProblem(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field('???', '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field('???', '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str)), valid_datapipe=dict(CLS=field('???', '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=field('???', '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str)), test_datapipe=dict(CLS=field('???', '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=field('???', '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str)), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='???', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option')), downstream=dict(CLS=field('???', '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str)), task=dict(CLS=field('???', '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str)))
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = Container(cfg.corpus()).split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.add(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **stats)
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
|
class SuperbER(SuperbProblem):
'\n Superb Emotion Classification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=iemocap_for_superb, dataset_root='???', test_fold=field('???', 'The session in IEMOCAP used for testing.\nThe other sessions will be used for training and validation.')), train_datapipe=dict(CLS=UtteranceClassificationPipe, train_category_encoder=True), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=4, shuffle=True), valid_datapipe=dict(CLS=UtteranceClassificationPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=4), test_datapipe=dict(CLS=UtteranceClassificationPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=4), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask)))
@classmethod
def setup(cls, **cfg):
'\n This setups the ER problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=30000, log_step=500, eval_step=1000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=8, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
"\n Except 'num_fold', all other fields are for 'run' for every fold. That is, all folds shared the same\n config (training hypers, dataset root, etc) except 'workspace' and 'test_fold' are different\n "
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml')
|
class SuperbIC(SuperbProblem):
'\n Superb Intent Classification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=fsc_for_multiple_classfication, dataset_root='???'), train_datapipe=dict(CLS=UtteranceMultipleCategoryClassificationPipe, train_category_encoder=True), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32, shuffle=True), valid_datapipe=dict(CLS=UtteranceMultipleCategoryClassificationPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), test_datapipe=dict(CLS=UtteranceMultipleCategoryClassificationPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceMultiClassClassificationTask)))
@classmethod
def setup(cls, **cfg):
'\n This setups the IC problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=250, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbKS(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=gsc_v1_for_superb, dataset_root='???'), train_datapipe=dict(CLS=UtteranceClassificationPipe, train_category_encoder=True, sox_effects=EFFECTS), train_sampler=dict(CLS=BalancedWeightedSampler, batch_size=32), valid_datapipe=dict(CLS=UtteranceClassificationPipe, sox_effects=EFFECTS), valid_sampler=dict(CLS=BalancedWeightedSampler, batch_size=32), test_datapipe=dict(CLS=UtteranceClassificationPipe, sox_effects=EFFECTS), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask)))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume'))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbPR(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=librispeech_for_speech2text, dataset_root='???'), train_datapipe=dict(CLS=Speech2PhonemePipe), train_sampler=dict(CLS=SortedSliceSampler, batch_size=16, max_length=300000), valid_datapipe=dict(CLS=Speech2PhonemePipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8), test_datapipe=dict(CLS=Speech2PhonemePipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8), downstream=dict(CLS=FrameLevelLinear), task=dict(CLS=Speech2TextCTCTask, log_metrics=['per'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.01), trainer=dict(total_steps=100000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate_steps=2, valid_metric='per', valid_higher_better=False)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbSDDatapipe(SequentialDataPipe):
def __init__(self, feat_frame_shift: int, sample_rate: int=16000, **kwds):
super().__init__(UnfoldChunkByFrame(min_chunk_frames=2000, max_chunk_frames=2000, step_frames=2000, feat_frame_shift=feat_frame_shift, sample_rate=sample_rate), BuildMultiClassTagging(sample_rate=sample_rate, feat_frame_shift=feat_frame_shift), LoadAudio(audio_sample_rate=sample_rate), SetOutputKeys(x='wav', x_len='wav_len', label='multiclass_tag', label_len='tag_len', rec_id='unchunked_id', order_in_rec='chunk_index'))
|
def prediction_numpy_to_segment_secs(prediction: np.ndarray, threshold: float=0.5, median_filter: int=1, frame_shift: int=160, subsampling: int=1, sampling_rate: int=16000):
'\n prediction: (timestamps, class_num), all values are in 0~1\n '
hard_pred = np.where((prediction > threshold), 1, 0)
if (median_filter > 1):
hard_pred = medfilt(hard_pred, (median_filter, 1))
factor = ((frame_shift * subsampling) / sampling_rate)
segments = dict()
for (classid, frames) in enumerate(hard_pred.T):
frames = np.pad(frames, (1, 1), 'constant')
(changes,) = np.where((np.diff(frames, axis=0) != 0))
if (len(changes) > 0):
class_name = str(classid)
segments[class_name] = []
for (s, e) in zip(changes[::2], changes[1::2]):
start = (s * factor)
end = (e * factor)
segments[class_name].append((start, end))
return segments
|
class SuperbSD(SuperbProblem):
'\n Superb Intent Classification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=kaldi_for_multiclass_tagging, dataset_root='???'), train_datapipe=dict(CLS=SuperbSDDatapipe, train_category_encoder=True), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8, shuffle=True), valid_datapipe=dict(CLS=SuperbSDDatapipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=SuperbSDDatapipe), test_sampler=dict(CLS=GroupSameItemSampler, item_name='unchunked_id', item_order_name='chunk_index'), downstream=dict(CLS=SuperbDiarizationModel, output_size=2, hidden_size=512, rnn_layers=1), task=dict(CLS=DiarizationPIT)))
@classmethod
def setup(cls, **cfg):
'\n This setups the IC problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=30000, log_step=500, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=4, valid_metric='der', valid_higher_better=False)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(workspace='???', prediction=field('prediction', 'The directory name under the workspace containing all the predicted numpy'), test_data=field('test_data', 'The testing data (in dict) under this workspace'), median_filters=field([1, 11], 'The median filter sizes to try when scoring'), thresholds=field([0.3, 0.4, 0.5, 0.6, 0.7], 'The threshold to try when determining 0/1 hard prediction.\nThe raw predictions are all between 0~1\n'), frame_shift=field(None, 'The frame shift of the prediction np.ndarray. Used to map the frame-level prediction back to seconds', int))
@classmethod
def scoring(cls, **cfg):
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
frame_shift = (cfg.frame_shift or workspace.environ['feat_frame_shift'])
test_data: dict = workspace[cfg.test_data]
test_segments = {reco: data_point['segments'] for (reco, data_point) in test_data.items()}
test_rttm = workspace.put(test_segments, 'test_rttm', 'rttm')
rttm_dir = (workspace / 'rttm')
scoring_dir = (workspace / 'scoring')
scoring_dir.mkdir(exist_ok=True, parents=True)
all_ders = []
reco2pred = {}
for p in tqdm((workspace / cfg.prediction).files(), desc='Load prediction'):
reco2pred[p] = (workspace / cfg.prediction)[p]
for median_filter in cfg.median_filters:
for threshold in cfg.thresholds:
logger.info(f'Decode prediction numpy array with the setting: median filter={median_filter}, threshold={threshold}')
all_segments = dict()
workspace = Workspace(workspace)
at_least_one_segment = False
for p in tqdm((workspace / cfg.prediction).files(), desc='prediction to seconds'):
segments = prediction_numpy_to_segment_secs(reco2pred[p], threshold, median_filter, frame_shift)
if (len(segments) > 0):
at_least_one_segment = True
all_segments[p] = segments
if (not at_least_one_segment):
logger.info('No segments found under this decoding setting')
continue
identifier = f'hyp_threshold-{threshold}_median-{median_filter}'
hyp_rttm = rttm_dir.put(all_segments, identifier, 'rttm')
overall_der = cls.score_with_dscore(dscore_dir=(workspace / 'dscore'), hyp_rttm=hyp_rttm, gt_rttm=test_rttm, score_file=Path((scoring_dir / identifier)))
logger.info(f'Overall DER with median_filter {median_filter} and threshold {threshold}: {overall_der}')
all_ders.append(overall_der)
all_ders.sort()
best_der = all_ders[0]
logger.info(f'Best DER on test data: {best_der}')
workspace.put(dict(der=best_der), 'test_metric', 'yaml')
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference', 'scoring'], start_stage='setup', final_stage='scoring', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun'), scoring=scoring.default_cfg.deselect('workspace')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(dscore_dir=field('???', "The directory containing the 'dscore' repository"), hyp_rttm=field('???', 'The hypothesis rttm file'), gt_rttm=field('???', 'The ground truth rttm file'), score_file=field('???', 'The scored result file'))
@classmethod
def score_with_dscore(cls, **cfg) -> float:
"\n This function returns the overall DER score, and will also write the detailed scoring results\n to 'score_file'\n "
cfg = Container(cfg)
dscore_dir = Workspace(cfg.dscore_dir)
if ((not dscore_dir.is_dir()) or ('score' not in dscore_dir.files())):
subprocess.check_output(f'git clone https://github.com/nryant/dscore.git {dscore_dir}', shell=True).decode('utf-8')
result = subprocess.check_call(f'python3 {dscore_dir}/score.py -r {cfg.gt_rttm} -s {cfg.hyp_rttm} > {cfg.score_file}', shell=True)
assert (result == 0), 'The scoring step fail.'
with open(cfg.score_file) as file:
lines = file.readlines()
overall_lines = [line for line in lines if ('OVERALL' in line)]
assert (len(overall_lines) == 1)
overall_line = overall_lines[0]
overall_line = re.sub('\t+', ' ', overall_line)
overall_line = re.sub(' +', ' ', overall_line)
overall_der = float(overall_line.split(' ')[3])
return overall_der
|
class SuperbSF(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=snips_for_speech2text, dataset_root='???'), train_datapipe=dict(CLS=Speech2TextPipe, generate_tokenizer=True, vocab_type='character-slot', vocab_file=_urls_to_filepaths(VOCAB_URL), slots_file=_urls_to_filepaths(SLOTS_URL)), train_sampler=dict(CLS=SortedSliceSampler, batch_size=32, max_length=300000), valid_datapipe=dict(CLS=Speech2TextPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=Speech2TextPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=ModelWithSpecaug, model_cfg=dict(CLS=RNNEncoder, module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_cfg=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), task=dict(CLS=Speech2TextCTCTask, log_metrics=['wer', 'cer', 'slot_type_f1', 'slot_value_cer', 'slot_value_wer', 'slot_edit_f1_full', 'slot_edit_f1_part'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='slot_type_f1', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbSIDTrainPipe(DataPipe):
def __init__(self, train_category_encoder: bool=False, max_secs: float=None) -> None:
self.pipes = SequentialDataPipe(UtteranceClassificationPipe(train_category_encoder=train_category_encoder), RandomCrop(max_secs=max_secs), SetOutputKeys(dict(x='wav_crop', x_len='wav_crop_len')))
def forward(self, dataset):
dataset = self.pipes(dataset)
return dataset
|
class SuperbSID(SuperbProblem):
'\n Superb SID\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=voxceleb1_for_utt_classification, dataset_root='???'), train_datapipe=dict(CLS=SuperbSIDTrainPipe, train_category_encoder=True, max_secs=8.0), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8, shuffle=True), valid_datapipe=dict(CLS=UtteranceClassificationPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=UtteranceClassificationPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask)))
@classmethod
def setup(cls, **cfg):
'\n This setups the IC problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=500, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=4, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbSV(SuperbProblem):
'\n Superb Speaker Verification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=voxceleb1_for_sv, dataset_root='???'), train_datapipe=dict(CLS=SpeakerVerificationPipe, random_crop_secs=8.0, sox_effects=EFFECTS), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=10, shuffle=True), valid_datapipe=dict(CLS=SpeakerVerificationPipe, sox_effects=EFFECTS), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=SpeakerVerificationPipe, sox_effects=EFFECTS), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=SuperbXvector), task=dict(CLS=SpeakerVerification, loss_type='amsoftmax', loss_cfg=dict(margin=0.4, scale=30))))
@classmethod
def setup(cls, **cfg):
'\n This setups the ASV problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.AdamW', lr=0.0001), trainer=dict(total_steps=200000, log_step=500, eval_step=field(10000000000.0, 'ASV do not use validation set'), save_step=20000, gradient_clipping=1000.0, gradient_accumulate_steps=5, valid_metric='eer', valid_higher_better=False, max_keep=10)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_except(inference_steps=field([20000, 40000, 60000, 80000, 100000, 120000, 140000, 160000, 180000, 200000], 'The steps used for inference\n', 'egs: [900, 1000] - use the checkpoint of 90 and 100 steps for inference')))
@classmethod
def inference(cls, **cfg):
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
dataset = workspace[f'{cfg.split_name}_dataset']
sampler = workspace[f'{cfg.split_name}_sampler']
dataloader = DataLoader(dataset, sampler, num_workers=cfg.n_jobs)
with torch.no_grad():
all_eers = []
for step in cfg.inference_steps:
step_dir = (workspace / f'step-{step}')
task = step_dir['task']
task = task.to(cfg.device)
task.eval()
test_results = []
for (batch_idx, batch) in enumerate(tqdm(dataloader, desc='Test', total=len(dataloader))):
batch = batch.to(cfg.device)
result = task.test_step(**batch)
test_results.append(result.cacheable())
logs: Logs = task.test_reduction(test_results).logs
logger.info(f'Step {step}')
metrics = {key: value for (key, value) in logs.scalars()}
step_dir.put(metrics, 'test_metrics', 'yaml')
for (key, value) in metrics.items():
logger.info(f'{key}: {value}')
all_eers.append(metrics['EER'])
workspace.put({'minEER': min(all_eers)}, 'test_metrics', 'yaml')
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
def get_downstream_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices=['train', 'evaluate', 'inference'], required=True)
parser.add_argument('-t', '--evaluate_split', default='test')
parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority')
parser.add_argument('--backend', default='nccl', help='The backend for distributed training')
parser.add_argument('--local_rank', type=int, help=f'The GPU id this process should use while distributed training. None when not launched by torch.distributed.launch')
parser.add_argument('-e', '--past_exp', metavar='{CKPT_PATH,CKPT_DIR}', help='Resume training from a checkpoint')
parser.add_argument('-i', '--init_ckpt', metavar='CKPT_PATH', help='Load the checkpoint for evaluation')
parser.add_argument('-c', '--config', help='The yaml file for configuring the whole experiment except the upstream model')
parser.add_argument('-d', '--downstream', help=' Typically downstream dataset need manual preparation. Please check downstream/README.md for details')
parser.add_argument('-v', '--downstream_variant', help='Downstream vairants given the same expert')
parser.add_argument('--hub', default='torch', choices=['torch', 'huggingface'], help='The model Hub used to retrieve the upstream model.')
upstreams = [attr for attr in dir(hub) if (attr[0] != '_')]
parser.add_argument('-u', '--upstream', help=f'Upstreams with "_local" or "_url" postfix need local ckpt (-k) or config file (-g). Other upstreams download two files on-the-fly and cache them, so just -u is enough and -k/-g are not needed. Please check upstream/README.md for details. Available options in S3PRL: {upstreams}. ')
parser.add_argument('-k', '--upstream_ckpt', metavar='{PATH,URL,GOOGLE_DRIVE_ID}', help='Only set when the specified upstream need it')
parser.add_argument('-g', '--upstream_model_config', help='The config file for constructing the pretrained model')
parser.add_argument('-r', '--upstream_refresh', action='store_true', help='Re-download cached ckpts for on-the-fly upstream variants')
parser.add_argument('-f', '--upstream_trainable', action='store_true', help='Fine-tune, set upstream.train(). Default is upstream.eval()')
parser.add_argument('-s', '--upstream_feature_selection', default='hidden_states', help='Specify the layer to be extracted as the representation')
parser.add_argument('-l', '--upstream_layer_selection', type=int, help='Select a specific layer for the features selected by -s')
parser.add_argument('--upstream_feature_normalize', action='store_true', help='Specify whether to normalize hidden features before weighted sum')
parser.add_argument('--upstream_model_name', default='model.pt', help='The name of the model file in the HuggingFace Hub repo.')
parser.add_argument('--upstream_revision', help='The commit hash of the specified HuggingFace Repository')
parser.add_argument('-n', '--expname', help='Save experiment at result/downstream/expname')
parser.add_argument('-p', '--expdir', help='Save experiment at expdir')
parser.add_argument('-a', '--auto_resume', action='store_true', help='Auto-resume if the expdir contains checkpoints')
parser.add_argument('--push_to_hf_hub', default=False, help='Push all files in experiment directory to the Hugging Face Hub. To use this feature you must set HF_USERNAME and HF_PASSWORD as environment variables in your shell')
parser.add_argument('--hf_hub_org', help='The Hugging Face Hub organisation to push fine-tuned models to')
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--device', default='cuda', help='model.to(device)')
parser.add_argument('--cache_dir', help='The cache directory for pretrained model downloading')
parser.add_argument('--verbose', action='store_true', help='Print model infomation')
parser.add_argument('--disable_cudnn', action='store_true', help='Disable CUDNN')
args = parser.parse_args()
backup_files = []
if (args.expdir is None):
args.expdir = f'result/downstream/{args.expname}'
if args.auto_resume:
if os.path.isdir(args.expdir):
ckpt_pths = glob.glob(f'{args.expdir}/states-*.ckpt')
if (len(ckpt_pths) > 0):
args.past_exp = args.expdir
if args.past_exp:
if os.path.isdir(args.past_exp):
ckpt_pths = glob.glob(f'{args.past_exp}/states-*.ckpt')
assert (len(ckpt_pths) > 0)
ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0])))
ckpt_pth = ckpt_pths[(- 1)]
else:
ckpt_pth = args.past_exp
print(f'[Runner] - Resume from {ckpt_pth}')
ckpt = torch.load(ckpt_pth, map_location='cpu')
def update_args(old, new, preserve_list=None):
out_dict = vars(old)
new_dict = vars(new)
for key in list(new_dict.keys()):
if (key in preserve_list):
new_dict.pop(key)
out_dict.update(new_dict)
return Namespace(**out_dict)
cannot_overwrite_args = ['mode', 'evaluate_split', 'override', 'backend', 'local_rank', 'past_exp', 'device']
args = update_args(args, ckpt['Args'], preserve_list=cannot_overwrite_args)
os.makedirs(args.expdir, exist_ok=True)
args.init_ckpt = ckpt_pth
config = ckpt['Config']
else:
print('[Runner] - Start a new experiment')
os.makedirs(args.expdir, exist_ok=True)
if (args.config is None):
args.config = f'./downstream/{args.downstream}/config.yaml'
with open(args.config, 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
if ((args.upstream_model_config is not None) and os.path.isfile(args.upstream_model_config)):
backup_files.append(args.upstream_model_config)
if ((args.override is not None) and (args.override.lower() != 'none')):
override(args.override, args, config)
os.makedirs(args.expdir, exist_ok=True)
return (args, config, backup_files)
|
def main():
logging.basicConfig(level=logging.INFO)
torch.multiprocessing.set_sharing_strategy('file_system')
torchaudio.set_audio_backend('sox_io')
hack_isinstance()
(args, config, backup_files) = get_downstream_args()
if (args.cache_dir is not None):
torch.hub.set_dir(args.cache_dir)
if (args.local_rank is not None):
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(args.backend)
if ((args.mode == 'train') and args.past_exp):
ckpt = torch.load(args.init_ckpt, map_location='cpu')
now_use_ddp = is_initialized()
original_use_ddp = (ckpt['Args'].local_rank is not None)
assert (now_use_ddp == original_use_ddp), f'{now_use_ddp} != {original_use_ddp}'
if now_use_ddp:
now_world = get_world_size()
original_world = ckpt['WorldSize']
assert (now_world == original_world), f'{now_world} != {original_world}'
if (args.hub == 'huggingface'):
args.from_hf_hub = True
hf_user = os.environ.get('HF_USERNAME')
hf_password = os.environ.get('HF_PASSWORD')
huggingface_token = HfApi().login(username=hf_user, password=hf_password)
HfFolder.save_token(huggingface_token)
print(f'Logged into Hugging Face Hub with user: {hf_user}')
if is_leader_process():
with open(os.path.join(args.expdir, f'args_{get_time_tag()}.yaml'), 'w') as file:
yaml.dump(vars(args), file)
with open(os.path.join(args.expdir, f'config_{get_time_tag()}.yaml'), 'w') as file:
yaml.dump(config, file)
for file in backup_files:
backup(file, args.expdir)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
if args.disable_cudnn:
torch.backends.cudnn.enabled = False
else:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
runner = Runner(args, config)
eval(f'runner.{args.mode}')()
|
def get_pretrain_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--past_exp', metavar='{CKPT_PATH,CKPT_DIR}', help='Resume training from a checkpoint')
parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority')
parser.add_argument('-c', '--config', metavar='CONFIG_PATH', help='The yaml file for configuring the whole experiment, except the upstream model')
parser.add_argument('-u', '--upstream', choices=os.listdir('pretrain/'))
parser.add_argument('-g', '--upstream_config', metavar='CONFIG_PATH', help='The yaml file for configuring the upstream model')
parser.add_argument('-n', '--expname', help='Save experiment at expdir/expname')
parser.add_argument('-p', '--expdir', help='Save experiment at expdir')
parser.add_argument('-a', '--auto_resume', action='store_true', help='Auto-resume if the expdir contains checkpoints')
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--device', default='cuda', help='model.to(device)')
parser.add_argument('--multi_gpu', action='store_true', help='Enables multi-GPU training')
args = parser.parse_args()
if (args.expdir is None):
args.expdir = f'result/pretrain/{args.expname}'
if args.auto_resume:
if os.path.isdir(args.expdir):
ckpt_pths = glob.glob(f'{args.expdir}/states-*.ckpt')
if (len(ckpt_pths) > 0):
args.past_exp = args.expdir
if args.past_exp:
if os.path.isdir(args.past_exp):
ckpt_pths = glob.glob(f'{args.past_exp}/states-*.ckpt')
assert (len(ckpt_pths) > 0)
ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0])))
ckpt_pth = ckpt_pths[(- 1)]
else:
ckpt_pth = args.past_exp
print(f'[Runner] - Resume from {ckpt_pth}')
ckpt = torch.load(ckpt_pth, map_location='cpu')
def update_args(old, new):
old_dict = vars(old)
new_dict = vars(new)
old_dict.update(new_dict)
return Namespace(**old_dict)
args = update_args(args, ckpt['Args'])
os.makedirs(args.expdir, exist_ok=True)
args.init_ckpt = ckpt_pth
config = ckpt['Config']
else:
print('[Runner] - Start a new experiment')
args.init_ckpt = None
assert (args.expname is not None)
if (args.expdir is None):
args.expdir = f'result/pretrain/{args.expname}'
os.makedirs(args.expdir, exist_ok=True)
upstream_dirs = [u for u in os.listdir('pretrain/') if re.search(f'^{u}_|^{u}$', args.upstream)]
assert (len(upstream_dirs) == 1)
if (args.config is None):
args.config = f'pretrain/{upstream_dirs[0]}/config_runner.yaml'
with open(args.config, 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
if os.path.isfile(args.config):
copyfile(args.config, f'{args.expdir}/config_runner.yaml')
else:
raise FileNotFoundError('Wrong file path for runner config.')
if (args.upstream_config is None):
default_upstream_config = f'pretrain/{upstream_dirs[0]}/config_model.yaml'
assert os.path.isfile(default_upstream_config)
args.upstream_config = default_upstream_config
if os.path.isfile(args.upstream_config):
copyfile(args.upstream_config, f'{args.expdir}/config_model.yaml')
else:
raise FileNotFoundError('Wrong file path for model config.')
if ((args.override is not None) and (args.override.lower() != 'none')):
override(args.override, args, config)
os.makedirs(args.expdir, exist_ok=True)
return (args, config)
|
def main():
(args, config) = get_pretrain_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
runner = Runner(args, config)
eval('runner.train')()
runner.logger.close()
|
def get_scheduler(optimizer, total_steps, scheduler_config):
scheduler_config = copy.deepcopy(scheduler_config)
scheduler_name = scheduler_config.pop('name')
scheduler = eval(f'get_{scheduler_name}')(optimizer, num_training_steps=total_steps, **scheduler_config)
return scheduler
|
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=(- 1)):
'\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases\n linearly between 0 and the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`int`, `optional`, defaults to 1):\n The number of hard restarts to use.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
if (progress >= 1.0):
return 0.0
return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(num_cycles) * progress) % 1.0))))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=(- 1)):
'\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\n initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`float`, `optional`, defaults to 0.5):\n The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\n following a half-cosine).\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
return max(0.0, (0.5 * (1.0 + math.cos((((math.pi * float(num_cycles)) * 2.0) * progress)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_sqrt_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return (1.0 / math.sqrt(max(current_step, num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_constant_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_noam_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return ((768 ** (- 0.5)) * min((current_step ** (- 0.5)), (current_step * (num_warmup_steps ** (- 1.5)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, lr_end=1e-07, power=1.0, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the\n optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the\n initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n lr_end (:obj:`float`, `optional`, defaults to 1e-7):\n The end LR.\n power (:obj:`float`, `optional`, defaults to 1.0):\n Power factor.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Note: `power` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT\n implementation at\n https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
lr_init = optimizer.defaults['lr']
assert (lr_init > lr_end), f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})'
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
elif (current_step > num_training_steps):
return (lr_end / lr_init)
else:
lr_range = (lr_init - lr_end)
decay_steps = (num_training_steps - num_warmup_steps)
pct_remaining = (1 - ((current_step - num_warmup_steps) / decay_steps))
decay = ((lr_range * (pct_remaining ** power)) + lr_end)
return (decay / lr_init)
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def label_vocab_as_dict(df: pd.DataFrame, key: str, value: str) -> Dict:
'\n Returns a dictionary of the label vocabulary mapping the label column to\n the idx column. key sets whether the label or idx is the key in the dict. The\n other column will be the value.\n '
if (key == 'label'):
df['label'] = df['label'].astype(str)
value = 'idx'
else:
assert (key == 'idx'), "key argument must be either 'label' or 'idx'"
value = 'label'
return df.set_index(key).to_dict()[value]
|
def label_to_binary_vector(label: List, num_labels: int) -> torch.Tensor:
'\n Converts a list of labels into a binary vector\n Args:\n label: list of integer labels\n num_labels: total number of labels\n\n Returns:\n A float Tensor that is multi-hot binary vector\n '
if (len(label) == 0):
binary_labels = torch.zeros((num_labels,), dtype=torch.float)
else:
binary_labels = torch.zeros((num_labels,)).scatter(0, torch.tensor(label), 1.0)
assert (set(torch.where((binary_labels == 1.0))[0].numpy()) == set(label))
return binary_labels
|
def validate_score_return_type(ret: Union[(Tuple[(Tuple[(str, float)], ...)], float)]):
'\n Valid return types for the metric are\n - tuple(tuple(string: name of the subtype, float: the value)): This is the\n case with sed eval metrics. They can return (("f_measure", value),\n ("precision", value), ...), depending on the scores\n the metric should is supposed to return. This is set as `scores`\n attribute in the metric.\n - float: Standard metric behaviour\n\n The downstream prediction pipeline is able to handle these two types.\n In case of the tuple return type, the value of the first entry in the\n tuple will be used as an optimisation criterion wherever required.\n For instance, if the return is (("f_measure", value), ("precision", value)),\n the value corresponding to the f_measure will be used ( for instance in\n early stopping if this metric is the primary score for the task )\n '
if isinstance(ret, tuple):
assert all((((type(s) == tuple) and (type(s[0]) == str) and (type(s[1]) == float)) for s in ret)), 'If the return type of the score is a tuple, all the elements in the tuple should be tuple of type (string, float)'
elif isinstance(ret, float):
pass
else:
raise ValueError(f'Return type {type(ret)} is unexpected. Return type of the score function should either be a tuple(tuple) or float. ')
|
class ScoreFunction():
'\n A simple abstract base class for score functions\n '
def __init__(self, label_to_idx: Dict[(str, int)], name: Optional[str]=None, maximize: bool=True):
"\n :param label_to_idx: Map from label string to integer index.\n :param name: Override the name of this scoring function.\n :param maximize: Maximize this score? (Otherwise, it's a loss or energy\n we want to minimize, and I guess technically isn't a score.)\n "
self.label_to_idx = label_to_idx
if name:
self.name = name
self.maximize = maximize
def __call__(self, *args, **kwargs) -> Union[(Tuple[(Tuple[(str, float)], ...)], float)]:
'\n Calls the compute function of the metric, and after validating the output,\n returns the metric score\n '
ret = self._compute(*args, **kwargs)
validate_score_return_type(ret)
return ret
def _compute(self, predictions: Any, targets: Any, **kwargs) -> Union[(Tuple[(Tuple[(str, float)], ...)], float)]:
'\n Compute the score based on the predictions and targets.\n This is a private function and the metric should be used as a functor\n by calling the `__call__` method which calls this and also validates\n the return type\n '
raise NotImplementedError('Inheriting classes must implement this function')
def __str__(self):
return self.name
|
class Top1Accuracy(ScoreFunction):
name = 'top1_acc'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
correct = 0
for (target, prediction) in zip(targets, predictions):
assert (prediction.ndim == 1)
assert (target.ndim == 1)
predicted_class = np.argmax(prediction)
target_class = np.argmax(target)
if (predicted_class == target_class):
correct += 1
return (correct / len(targets))
|
class ChromaAccuracy(ScoreFunction):
'\n Score specifically for pitch detection -- converts all pitches to chroma first.\n This score ignores octave errors in pitch classification.\n '
name = 'chroma_acc'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
correct = 0
for (target, prediction) in zip(targets, predictions):
assert (prediction.ndim == 1)
assert (target.ndim == 1)
predicted_class = np.argmax(prediction)
target_class = np.argmax(target)
if ((predicted_class % 12) == (target_class % 12)):
correct += 1
return (correct / len(targets))
|
class SoundEventScore(ScoreFunction):
'\n Scores for sound event detection tasks using sed_eval\n '
score_class: sed_eval.sound_event.SoundEventMetrics = None
def __init__(self, label_to_idx: Dict[(str, int)], scores: Tuple[str], params: Dict=None, name: Optional[str]=None, maximize: bool=True):
'\n :param scores: Scores to use, from the list of overall SED eval scores.\n The first score in the tuple will be the primary score for this metric\n :param params: Parameters to pass to the scoring function,\n see inheriting children for details.\n '
if (params is None):
params = {}
super().__init__(label_to_idx=label_to_idx, name=name, maximize=maximize)
self.scores = scores
self.params = params
assert (self.score_class is not None)
def _compute(self, predictions: Dict, targets: Dict, **kwargs) -> Tuple[(Tuple[(str, float)], ...)]:
reference_event_list = self.sed_eval_event_container(targets)
estimated_event_list = self.sed_eval_event_container(predictions)
scores = self.score_class(event_label_list=list(self.label_to_idx.keys()), **self.params)
for filename in predictions:
scores.evaluate(reference_event_list=reference_event_list.filter(filename=filename), estimated_event_list=estimated_event_list.filter(filename=filename))
nested_overall_scores: Dict[(str, Dict[(str, float)])] = scores.results_overall_metrics()
overall_scores: Dict[(str, float)] = dict(ChainMap(*nested_overall_scores.values()))
return tuple([(score, overall_scores[score]) for score in self.scores])
@staticmethod
def sed_eval_event_container(x: Dict[(str, List[Dict[(str, Any)]])]) -> MetaDataContainer:
reference_events = []
for (filename, event_list) in x.items():
for event in event_list:
reference_events.append({'event_label': str(event['label']), 'event_onset': (event['start'] / 1000.0), 'event_offset': (event['end'] / 1000.0), 'file': filename})
return MetaDataContainer(reference_events)
|
class SegmentBasedScore(SoundEventScore):
'\n segment-based scores - the ground truth and system output are compared in a\n fixed time grid; sound events are marked as active or inactive in each segment;\n\n See https://tut-arg.github.io/sed_eval/sound_event.html#sed_eval.sound_event.SegmentBasedMetrics # noqa: E501\n for params.\n '
score_class = sed_eval.sound_event.SegmentBasedMetrics
|
class EventBasedScore(SoundEventScore):
'\n event-based scores - the ground truth and system output are compared at\n event instance level;\n\n See https://tut-arg.github.io/sed_eval/generated/sed_eval.sound_event.EventBasedMetrics.html # noqa: E501\n for params.\n '
score_class = sed_eval.sound_event.EventBasedMetrics
|
class MeanAveragePrecision(ScoreFunction):
'\n Average Precision is calculated in macro mode which calculates\n AP at a class level followed by macro-averaging across the classes.\n '
name = 'mAP'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
'\n Based on suggestions from Eduardo Fonseca -\n Equal weighting is assigned to each class regardless\n of its prior, which is commonly referred to as macro\n averaging, following Hershey et al. (2017); Gemmeke et al.\n (2017).\n This means that rare classes are as important as common\n classes.\n\n Issue with average_precision_score, when all ground truths are negative\n https://github.com/scikit-learn/scikit-learn/issues/8245\n This might come up in small tasks, where few samples are available\n '
return average_precision_score(targets, predictions, average='macro')
|
class DPrime(ScoreFunction):
'\n DPrime is calculated per class followed by averaging across the classes\n\n Code adapted from code provided by Eduoard Fonseca.\n '
name = 'd_prime'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
try:
auc = roc_auc_score(targets, predictions, average=None)
d_prime = (stats.norm().ppf(auc) * np.sqrt(2.0))
d_prime_macro = np.mean(d_prime)
return d_prime_macro
except ValueError:
return np.nan
|
class AUCROC(ScoreFunction):
'\n AUCROC (macro mode) is calculated per class followed by averaging across the\n classes\n '
name = 'aucroc'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
try:
auc = roc_auc_score(targets, predictions, average='macro')
return auc
except ValueError:
return np.nan
|
class AutoregressiveReconstructionTask(Task):
'\n Attributes:\n upstream (torch.nn.Module): The upstream encoder (transformers, rnn, etc) that outputs `hidden_states`\n predictor (torch.nn.Module): The pre-training predictor that takes `hidden_states` as input and maps to the task target\n loss (torch.nn Loss Functions): The reconstruction loss (torch.nn.L1Loss, torch.nn.MSELoss, etc)\n '
def __init__(self, upstream: UpstreamExample, predictor: PredictorExample, loss: torch.nn.L1Loss, **kwargs):
'\n The input feature does not necessary have to be the same as the target feature.\n\n Args:\n upstream (Encoder)\n predictor (Predictor)\n loss (reconstruction loss)\n feat_A -> upstream -> predictor -> feat_B\n loss(feat_A, feat_B)\n '
super().__init__()
self.upstream = upstream
self.predictor = predictor
self.loss = loss()
def predict(self, x: torch.Tensor, label: torch.Tensor, x_len: list):
'\n Args:\n x (torch.Tensor): source_feat - (batch_size, timestamps, input_size)\n label (torch.Tensor): target_feat - (batch_size, timestamps, output_size)\n x_len (torch.Tensor): (batch_size) list of the original feature sequence length minus the value of `n_future`\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
upstream_output: torch.Tensor = self.upstream(x, x_len.tolist())
prediction: torch.Tensor = self.predictor(upstream_output).prediction
reconstruction_loss = self.loss(prediction, label)
return Output(loss=reconstruction_loss, hidden_states=upstream_output.hidden_states, prediction=prediction)
def _general_forward(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str]):
(loss, hidden_states, prediction) = self.predict(x, label, x_len).slice(3)
logs = Logs()
logs.add_hidden_state('hidden_states', hidden_states)
logs.add_hidden_state('prediction', prediction)
return Output(loss=loss, prediction=prediction, label=label, unique_name=unique_name, logs=logs)
def _general_reduction(self, batch_results: list, on_epoch_end: bool=None):
losses = []
for batch_result in batch_results:
losses.append(batch_result.loss)
loss = (sum(losses) / len(losses)).item()
logs = Logs()
logs.add_scalar('loss', loss)
return Output(logs=logs)
def train_step(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str], **kwargs):
'\n Each forward step in the training loop\n\n Args:\n source_feat (torch.Tensor): (batch_size, timestamps, input_size)\n target_feat (torch.Tensor): (batch_size, timestamps, output_size)\n feat_len (int): length of the original feature sequence minus `n_future`\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
return self._general_forward(x, label, x_len, unique_name)
def train_reduction(self, batch_results: list, on_epoch_end: bool=False):
'\n After several forward steps, outputs should be collected untouched (but detaching the Tensors)\n into a list and passed as batch_results. This function examine the collected items and compute\n metrics across these batches. This function might be called in the middle of an epoch for quick\n logging, or after exactly an epoch to know the epoch level performance.\n\n Args:\n batch_results (List[cacheable version of the output of self.train_step])\n on_epoch_end (bool):\n usually you should keep the same behavior between sub-epoch and epoch level\n this parameter is here in case you need specific postprocessing which must\n only be done right on the end of an epoch\n\n Return:\n logs (List[Log]):\n a list of content to log onto any logger\n each content should be in the Log class format\n '
return self._general_reduction(batch_results, on_epoch_end)
def valid_step(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str], **kwargs):
return self._general_forward(x, label, x_len, unique_name)
def test_step(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str], **kwargs):
return self._general_forward(x, label, x_len, unique_name)
def valid_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
def test_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
|
class Task(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def get_state(self):
return {}
def set_state(self, state: dict):
pass
def parse_cached_results(self, cached_results: List[dict]):
keys = list(cached_results[0].keys())
dol = defaultdict(list)
for d in cached_results:
assert (sorted(keys) == sorted(list(d.keys())))
for (k, v) in d.items():
if isinstance(v, (tuple, list)):
dol[k].extend(v)
else:
dol[k].append(v)
return dict(dol)
@abc.abstractmethod
def predict(self):
raise NotImplementedError
def forward(self, mode: str, *args, **kwargs):
return getattr(self, f'{mode}_step')(*args, **kwargs)
def reduction(self, mode: str, *args, **kwargs):
return getattr(self, f'{mode}_reduction')(*args, **kwargs)
@abc.abstractmethod
def train_step(self):
raise NotImplementedError
@abc.abstractmethod
def valid_step(self):
raise NotImplementedError
@abc.abstractmethod
def test_step(self):
raise NotImplementedError
@abc.abstractmethod
def train_reduction(self):
raise NotImplementedError
@abc.abstractmethod
def valid_reduction(self):
raise NotImplementedError
@abc.abstractmethod
def test_reduction(self):
raise NotImplementedError
|
class FeatReconstructionTask(Task):
'\n Attributes:\n upstream (torch.nn.Module): The upstream encoder (transformers, rnn, etc) that outputs `hidden_states`\n predictor (torch.nn.Module): The pre-training predictor that takes `hidden_states` as input and maps to the task target\n loss (torch.nn Loss Functions): The reconstruction loss (torch.nn.L1Loss, torch.nn.MSELoss, etc)\n '
def __init__(self, upstream: UpstreamExample, predictor: PredictorExample, loss: torch.nn.L1Loss, loss_config: dict={}, **kwargs):
'\n The input feature does not necessary have to be the same as the target feature.\n\n Args:\n upstream (Encoder)\n predictor (Projection NN)\n loss (reconstruction loss)\n feat_A -> upstream -> predictor -> feat_B\n loss(feat_A, feat_B)\n '
super().__init__()
self.upstream = upstream
self.predictor = predictor
self.loss = loss(**loss_config)
def predict(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None):
'\n Args:\n x (torch.Tensor): source_feat - (batch_size, timestamps, input_size)\n label (torch.Tensor): target_feat - (batch_size, timestamps, output_size)\n label_mask (torch.BoolTensor): (batch_size, timestamps, output_size)\n position_encoding (torch.Tensor): (batch_size, timestamps, input_size)\n attention_mask (torch.LongTensor): (batch_size, timestamps)\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
if ((position_encoding is None) and (attention_mask is None)):
upstream_output: torch.Tensor = self.upstream(x)
else:
upstream_output: torch.Tensor = self.upstream(x, position_encoding, attention_mask)
prediction: torch.Tensor = self.predictor(upstream_output).prediction
if (label_mask is None):
reconstruction_loss = self.loss(prediction, label)
else:
assert (label_mask.sum() > 0), 'Without any masking, loss might go NaN.'
reconstruction_loss = self.loss(prediction.masked_select(label_mask), label.masked_select(label_mask))
return Output(loss=reconstruction_loss, hidden_states=upstream_output.hidden_states, prediction=prediction)
def _general_forward(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None):
(loss, hidden_states, prediction) = self.predict(x, label, label_mask, position_encoding, attention_mask).slice(3)
logs = Logs()
logs.add_hidden_state('hidden_states', hidden_states)
logs.add_hidden_state('prediction', prediction)
return Output(loss=loss, prediction=prediction, label=label, unique_name=unique_name, logs=logs)
def _general_reduction(self, batch_results: list, on_epoch_end: bool=None):
losses = []
for batch_result in batch_results:
losses.append(batch_result.loss)
loss = (sum(losses) / len(losses)).item()
logs = Logs()
logs.add_scalar('loss', loss)
return Output(logs=logs)
def train_step(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None, **kwargs):
'\n Each forward step in the training loop\n\n Args:\n source_feat (torch.Tensor): (batch_size, timestamps, input_size)\n target_feat (torch.Tensor): (batch_size, timestamps, output_size)\n label_mask (torch.BoolTensor): (batch_size, timestamps, output_size)\n pos_enc (torch.Tensor): (batch_size, timestamps, input_size)\n attn_mask (torch.LongTensor): (batch_size, timestamps)\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
return self._general_forward(x, label, label_mask, position_encoding, attention_mask, unique_name)
def train_reduction(self, batch_results: list, on_epoch_end: bool=False):
'\n After several forward steps, outputs should be collected untouched (but detaching the Tensors)\n into a list and passed as batch_results. This function examine the collected items and compute\n metrics across these batches. This function might be called in the middle of an epoch for quick\n logging, or after exactly an epoch to know the epoch level performance.\n\n Args:\n batch_results (List[cacheable version of the output of self.train_step])\n on_epoch_end (bool):\n usually you should keep the same behavior between sub-epoch and epoch level\n this parameter is here in case you need specific postprocessing which must\n only be done right on the end of an epoch\n\n Return:\n logs (List[Log]):\n a list of content to log onto any logger\n each content should be in the Log class format\n '
return self._general_reduction(batch_results, on_epoch_end)
def valid_step(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None, **kwargs):
return self._general_forward(x, label, label_mask, position_encoding, attention_mask, unique_name)
def test_step(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None, **kwargs):
return self._general_forward(x, label, label_mask, position_encoding, attention_mask, unique_name)
def valid_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
def test_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
|
class OneHotToCrossEntropyLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, y_hat: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
assert torch.all((torch.sum(y, dim=1) == y.new_ones(y.shape[0])))
y = y.argmax(dim=1)
return self.loss(y_hat, y)
|
class ScenePredictionTask(Task):
def __init__(self, model: torch.nn.Module, category: CategoryEncoder, prediction_type: str, scores: List[str]):
super().__init__()
self.model = model
self.label_to_idx = {str(category.decode(idx)): idx for idx in range(len(category))}
self.idx_to_label = {idx: str(category.decode(idx)) for idx in range(len(category))}
self.scores = [available_scores[score](label_to_idx=self.label_to_idx) for score in scores]
if (prediction_type == 'multilabel'):
self.activation: torch.nn.Module = torch.nn.Sigmoid()
self.logit_loss = torch.nn.BCEWithLogitsLoss()
elif (prediction_type == 'multiclass'):
self.activation = torch.nn.Softmax(dim=(- 1))
self.logit_loss = OneHotToCrossEntropyLoss()
else:
raise ValueError(f'Unknown prediction_type {prediction_type}')
def predict(self, x, x_len):
(logits, _) = self.model(x, x_len)
prediction = self.activation(logits)
return (prediction, logits)
def forward(self, _mode: str, x, x_len, y, labels, unique_name: str, _dump_dir: str=None):
(y_pr, y_hat) = self.predict(x, x_len)
loss = self.logit_loss(y_hat.float(), y.float())
cacheable = dict(loss=loss.detach().cpu().item(), label=y.detach().cpu().unbind(dim=0), logit=y_hat.detach().cpu().unbind(dim=0), prediction=y_pr.detach().cpu().unbind(dim=0))
return (loss, cacheable)
def log_scores(self, score_args):
'Logs the metric score value for each score defined for the model'
assert hasattr(self, 'scores'), 'Scores for the model should be defined'
end_scores = {}
for score in self.scores:
score_ret = score(*score_args)
validate_score_return_type(score_ret)
if isinstance(score_ret, tuple):
end_scores[f'{score}'] = score_ret[0][1]
for (subscore, value) in score_ret:
end_scores[f'{score}_{subscore}'] = value
elif isinstance(score_ret, float):
end_scores[f'{score}'] = score_ret
else:
raise ValueError(f'Return type {type(score_ret)} is unexpected. Return type of the score function should either be a tuple(tuple) or float.')
return end_scores
def reduction(self, _mode: str, cached_results: List[dict], _dump_dir: str=None):
result = self.parse_cached_results(cached_results)
target = torch.stack(result['label'], dim=0)
prediction_logit = torch.stack(result['logit'], dim=0)
prediction = torch.stack(result['prediction'], dim=0)
loss = self.logit_loss(prediction_logit, target)
logs = dict(loss=loss.detach().cpu().item())
if (_mode in ['valid', 'test']):
logs.update(self.log_scores(score_args=(prediction.detach().cpu().numpy(), target.detach().cpu().numpy())))
return logs
|
class SpeakerClassifier(torch.nn.Module):
'\n Attributes:\n input_size: int\n output_size: int\n '
def __init__(self, input_size=3, output_size=4):
super().__init__()
self._input_size = input_size
self._output_size = output_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
def forward(self, x, x_len):
'\n Args:\n x (torch.Tensor): (batch_size, timestemps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n output (torch.Tensor): (batch_size, output_size)\n '
assert (x.size((- 1)) == self.input_size)
output = torch.randn(x.size(0), self.output_size)
assert output
|
class SpeakerVerification(Task):
'\n model.output_size should match len(categories)\n\n Args:\n model (SpeakerClassifier):\n actual model or a callable config for the model\n categories (dict[str]):\n each key in the Dictionary is the final prediction content in str.\n use categories[key] to encode as numeric label\n test_trials (List[Tuple[int, str, str]]):\n each tuple in the list consists of (label, enroll_utt, test_utt)\n loss_type (str): softmax or amsoftmax\n loss_conf (dict): arguments for the loss_type class\n '
def __init__(self, model: SpeakerClassifier, category: CategoryEncoder, test_trials: List[Tuple[(int, str, str)]]=None, loss_type: str='amsoftmax', loss_conf: dict=None):
super().__init__()
self.model = model
self.category = category
self.trials = test_trials
if (loss_type == 'amsoftmax'):
loss_cls = amsoftmax
elif (loss_type == 'softmax'):
loss_cls = softmax
else:
raise ValueError(f'Unsupported loss_type {loss_type}')
self.loss: torch.nn.Module = loss_cls(input_size=self.model.output_size, output_size=len(self.category), **loss_conf)
assert (self.loss.output_size == len(category))
def get_state(self):
return {'loss_state': self.loss.state_dict()}
def set_state(self, state: dict):
self.loss.load_state_dict(state['loss_state'])
def predict(self, x: torch.Tensor, x_len: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, timestamps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n torch.Tensor\n\n (batch_size, output_size)\n '
spk_embeddings = self.model(x, x_len)
return spk_embeddings
def train_step(self, x: torch.Tensor, x_len: torch.LongTensor, class_id: torch.LongTensor, unique_name: List[str], _dump_dir: str=None):
spk_embeddings = self.predict(x, x_len)
(loss, logits) = self.loss(spk_embeddings, class_id)
prediction = [index for index in logits.argmax(dim=(- 1)).detach().cpu().tolist()]
cacheable = dict(loss=loss.detach().cpu().item(), class_id=class_id.detach().cpu().tolist(), prediction=prediction, unique_name=unique_name)
return (loss, cacheable)
def train_reduction(self, cached_results: list, _dump_dir: str=None):
results = self.parse_cached_results(cached_results)
acc = accuracy(results['prediction'], results['class_id'])
loss = torch.FloatTensor(results['loss']).mean().item()
return dict(loss=loss, accuracy=acc)
def test_step(self, x: torch.Tensor, x_len: torch.LongTensor, unique_name: List[str], _dump_dir: str):
'\n Args:\n x (torch.Tensor): (batch_size, timestamps, input_size)\n x_len: torch.LongTensor\n unique_name (List[str])\n\n Return:\n unique_name (List[str])\n output (torch.Tensor):\n speaker embeddings corresponding to unique_name\n '
spk_embeddings = self.predict(x, x_len)
cacheable = dict(unique_name=unique_name.tolist(), spk_embedding=spk_embeddings.detach().cpu().unbind(dim=0))
return (None, cacheable)
def test_reduction(self, cached_results: List[dict], _dump_dir: str):
results = self.parse_cached_results(cached_results)
embeddings = {}
for (name, emb) in zip(results['unique_name'], results['spk_embedding']):
embeddings[name] = emb
trials = self.trials
scores = []
labels = []
for (label, enroll, test) in tqdm(trials, desc='Test Scoring', total=len(trials)):
enroll_embd = embeddings[enroll]
test_embd = embeddings[test]
score = F.cosine_similarity(enroll_embd, test_embd, dim=0).item()
scores.append(score)
labels.append(label)
(EER, EERthreshold) = compute_eer(labels, scores)
(minDCF, minDCFthreshold) = compute_minDCF(labels, scores, p_target=0.01)
return dict(EER=EER, EERthreshold=EERthreshold.item(), minDCF=minDCF, minDCF_threshold=minDCFthreshold)
|
class Speech2TextCTCExample(nn.Module):
'An example speech-to-text task with CTC objective\n\n Args:\n input_size (int, optional): Input size. Defaults to 3.\n output_size (int, optional): Output size. Defaults to 4.\n '
def __init__(self, input_size=3, output_size=4):
super().__init__()
self._input_size = input_size
self._output_size = output_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
def forward(self, x, x_len):
'\n Args:\n x (torch.Tensor): (batch_size, timestemps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n y (torch.Tensor): (batch_size, output_size)\n y_len (torch.LongTensor): (batch_size)\n '
assert (x.size((- 1)) == self.input_size)
output = torch.randn(x.size(0), x.size(1), self.output_size)
assert output, x_len
|
class Speech2TextCTCTask(Task):
'Speech-to-text task with CTC objective\n\n Args:\n model (Speech2TextCTCExample)\n tokenizer (Tokenizer): Text tokenizer.\n decoder (Union[BeamDecoder, dict], optional):\n Beam decoder or decoder\'s config. Defaults to None.\n log_metrics (List[str], optional):\n Metrics to be logged. Defaults to ["cer", "wer"].\n '
def __init__(self, model: torch.nn.Module, tokenizer: Tokenizer, decoder: Union[(BeamDecoder, dict)]=None, log_metrics: List[str]=['cer', 'wer']) -> None:
super().__init__()
self.model = model
assert isinstance(tokenizer, Tokenizer)
self.tokenizer = tokenizer
self.log_metrics = log_metrics
if (BeamDecoder is None):
decoder = None
if isinstance(decoder, dict):
decoder = BeamDecoder(**decoder)
logger.info('Using flashlight decoder.')
self.decoder = decoder
self.criterion = nn.CTCLoss(blank=self.tokenizer.pad_idx, zero_infinity=True)
def predict(self, x: torch.Tensor, x_len: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, timestamps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n logits (torch.Tensor): (batch_size, timestamps, output_size)\n prediction (list): prediction strings\n valid_length (torch.LongTensor): (batch_size, )\n '
(logits, x_len) = self.model(x, x_len)
predicted_tokens = torch.argmax(logits, dim=2).detach().cpu()
filtered_tokens = [[token for token in pred_token.unique_consecutive().tolist() if ((token != self.tokenizer.pad_idx) and (token != self.tokenizer.eos_idx))] for pred_token in predicted_tokens]
predictions = [self.tokenizer.decode(token_list) for token_list in filtered_tokens]
return (logits, predictions, x_len)
def forward(self, _mode: str, x: torch.Tensor, x_len: torch.LongTensor, labels: np.ndarray, class_ids: torch.LongTensor, unique_name: np.ndarray, beam_decode: bool=False, _dump_dir: str=None):
'\n Each forward step in the training loop\n\n Args:\n mode (str): train / valid / test\n x (torch.Tensor):\n Input waveform or acoustic features.\n (batch_size, timestamps, input_size)\n x_len (torch.LongTensor):\n Lengths of inputs.\n (batch_size, )\n labels (np.ndarray):\n Ground truth transcriptions (str).\n (batch_size, )\n class_ids (torch.LongTensor):\n Tokenized ground truth transcriptions.\n unique_name (np.ndarray):\n Unique names for each sample.\n\n '
(logits, prediction, x_len) = self.predict(x, x_len)
log_probs = F.log_softmax(logits, dim=2)
y = class_ids
y_len = torch.tensor([(ids != self.tokenizer.pad_idx).long().sum() for ids in class_ids], dtype=torch.long, device=logits.device)
loss = self.criterion(log_probs.transpose(0, 1), y, x_len, y_len)
hyps = None
if (beam_decode and (self.decoder is not None)):
hyps = self.decoder.decode(log_probs.detach())
cacheable = dict(loss=loss.detach().cpu().item(), prediction=prediction, label=labels.tolist(), unique_name=unique_name.tolist(), hypotheses=hyps)
return (loss, cacheable)
def reduction(self, _mode: str, cached_results: List[dict], _dump_dir: str=None):
results = self.parse_cached_results(cached_results)
losses = results['loss']
predictions = results['prediction']
labels = results['label']
unique_names = results['unique_name']
if (_dump_dir is not None):
with (Path(_dump_dir) / 'hyp').open('w') as f:
f.writelines([f'''{uid} {p}
''' for (p, uid) in zip(predictions, unique_names)])
with (Path(_dump_dir) / 'ref').open('w') as f:
f.writelines([f'''{uid} {p}
''' for (p, uid) in zip(labels, unique_names)])
beam_hyps = None
if (results['hypotheses'][0] is not None):
beam_hyps = [' '.join(hyp[0].words) for hyp in results['hypotheses']]
logs = {}
logs['loss'] = float(np.mean(losses))
if ('wer' in self.log_metrics):
logs['wer'] = wer(predictions, labels)
if ('cer' in self.log_metrics):
logs['cer'] = cer(predictions, labels)
if ('per' in self.log_metrics):
logs['per'] = per(predictions, labels)
if ('slot_type_f1' in self.log_metrics):
logs['slot_type_f1'] = slot_type_f1(predictions, labels)
if ('slot_value_cer' in self.log_metrics):
logs['slot_value_cer'] = slot_value_cer(predictions, labels)
if ('slot_value_wer' in self.log_metrics):
logs['slot_value_wer'] = slot_value_wer(predictions, labels)
if ('slot_edit_f1_full' in self.log_metrics):
logs['slot_edit_f1_full'] = slot_edit_f1_full(predictions, labels)
if ('slot_edit_f1_part' in self.log_metrics):
logs['slot_edit_f1_part'] = slot_edit_f1_part(predictions, labels)
if (beam_hyps is not None):
logs['wer_beam'] = wer(beam_hyps, labels)
logs['char_beam'] = cer(beam_hyps, labels)
return logs
|
class CMVN(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVN, self).__init__()
if (mode != 'global'):
raise NotImplementedError('Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def forward(self, x):
if (self.mode == 'global'):
return ((x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True)))
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
|
class FeatureExtractor(nn.Module):
'Feature extractor, transforming file path to Mel spectrogram'
def __init__(self, mode='fbank', num_mel_bins=80, decode_wav=False, apply_cmvn=True, **kwargs):
super(FeatureExtractor, self).__init__()
assert (mode == 'fbank'), 'Only Mel-spectrogram implemented'
self.mode = mode
self.extract_fn = kaldi.fbank
self.apply_cmvn = apply_cmvn
if self.apply_cmvn:
self.cmvn = CMVN()
self.num_mel_bins = num_mel_bins
self.kwargs = kwargs
self.decode_wav = decode_wav
if self.decode_wav:
torchaudio.set_audio_backend('soundfile')
def _load_file(self, filepath):
if self.decode_wav:
(waveform, sample_rate) = torchaudio.load_wav(filepath)
else:
(waveform, sample_rate) = torchaudio.load(filepath)
return (waveform, sample_rate)
def forward(self, waveform):
y = self.extract_fn(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=SAMPLE_RATE, window_type=WINDOW_TYPE, **self.kwargs)
if self.apply_cmvn:
y = y.transpose(0, 1).unsqueeze(0)
y = self.cmvn(y)
y = y.squeeze(0).transpose(0, 1)
return y
def extra_repr(self):
return 'mode={}, num_mel_bins={}'.format(self.mode, self.num_mel_bins)
def create_msg(self):
'List msg for verbose function'
msg = 'Audio spec.| Audio feat. = {}\t\t| feat. dim = {}\t| CMVN = {}'.format(self.mode, self.num_mel_bins, self.apply_cmvn)
return [msg]
|
def create_transform(audio_config):
feat_type = audio_config.pop('feat_type')
feat_dim = audio_config.pop('feat_dim')
decode_wav = audio_config.pop('decode_wav', False)
apply_cmvn = audio_config.pop('cmvn', True)
transforms = FeatureExtractor(feat_type, feat_dim, decode_wav, apply_cmvn, **audio_config)
return (transforms, feat_dim)
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
ckpt = torch.load(ckpt, map_location='cpu')
config = ckpt['config']
(self.preprocessor, feat_dim) = create_transform(config['data']['audio'])
self.model = APC(feat_dim, **config['model']['paras'])
self.model.load_state_dict(ckpt['model'])
if (len(self.hooks) == 0):
self.add_hook('self.model.rnn_layers[1]', (lambda input, output: pad_packed_sequence(input[0], batch_first=True)[0]))
self.add_hook('self.model.rnn_layers[2]', (lambda input, output: pad_packed_sequence(input[0], batch_first=True)[0]))
self.add_hook('self.model', (lambda input, output: output[1]))
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
features = [self.preprocessor(wav.unsqueeze(0)) for wav in wavs]
feat_lengths = [len(feat) for feat in features]
features = pad_sequence(features, batch_first=True)
feat_lengths = torch.LongTensor(feat_lengths)
(predicted_BxLxM, features) = self.model(features, feat_lengths, testing=(not self.training))
|
def apc_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def apc_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return apc_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def apc(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return apc_360hr(*args, refresh=refresh, **kwargs)
|
def apc_360hr(refresh=False, *args, **kwargs):
'\n The apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/apc_360hr.ckpt'
return apc_url(*args, refresh=refresh, **kwargs)
|
def apc_960hr(refresh=False, *args, **kwargs):
'\n The apc standard model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/apc_960hr.ckpt'
return apc_url(*args, refresh=refresh, **kwargs)
|
class VQLayer(nn.Module):
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
'\n Defines a VQ layer that follows an RNN layer.\n input_size: an int indicating the pre-quantized input feature size,\n usually the hidden size of RNN.\n codebook_size: an int indicating the number of codes.\n code_dim: an int indicating the size of each code. If not the last layer,\n then must equal to the RNN hidden size.\n gumbel_temperature: a float indicating the temperature for gumbel-softmax.\n '
super(VQLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
self.codebook_CxE = nn.Linear(codebook_size, code_dim, bias=False)
self.token_usg = np.zeros(codebook_size)
def forward(self, inputs_BxLxI, testing, lens=None):
logits_BxLxC = self.vq_logits(inputs_BxLxI)
if testing:
shape = logits_BxLxC.size()
(_, ind) = logits_BxLxC.max(dim=(- 1))
onehot_BxLxC = torch.zeros_like(logits_BxLxC).view((- 1), shape[(- 1)])
onehot_BxLxC.scatter_(1, ind.view((- 1), 1), 1)
onehot_BxLxC = onehot_BxLxC.view(*shape)
else:
onehot_BxLxC = gumbel_softmax(logits_BxLxC, tau=self.gumbel_temperature, hard=True, eps=EPS, dim=(- 1))
self.token_usg += onehot_BxLxC.detach().cpu().reshape((- 1), self.codebook_size).sum(dim=0).numpy()
codes_BxLxE = self.codebook_CxE(onehot_BxLxC)
return (logits_BxLxC, codes_BxLxE)
def report_ppx(self):
'Computes perplexity of distribution over codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
return (2 ** sum(((- acc_usg) * np.log2((acc_usg + EPS)))))
def report_usg(self):
'Computes usage each entry in codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
self.token_usg = np.zeros(self.codebook_size)
return acc_usg
|
def ast(refresh: bool=False, window_secs: float=10.24, stride_secs: float=10.24, **kwds):
kwds['ckpt'] = _urls_to_filepaths('https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1', refresh=refresh)
return _UpstreamExpert(window_secs=window_secs, stride_secs=stride_secs, **kwds)
|
def audio_albert_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def audio_albert_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return audio_albert_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def audio_albert(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return audio_albert_960hr(*args, refresh=refresh, **kwargs)
|
def audio_albert_960hr(refresh=False, *args, **kwargs):
'\n The audio albert base model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
return audio_albert_logMelBase_T_share_AdamW_b32_1m_960hr_drop1(*args, refresh=refresh, **kwargs)
|
def audio_albert_logMelBase_T_share_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/audio_albert/resolve/main/audio_albert_logMelBase_T_share_AdamW_b32_1m_960hr_drop1/states-1000000.ckpt'
return audio_albert_url(*args, refresh=refresh, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.