code stringlengths 17 6.64M |
|---|
def _doc_default_config(cls: Problem):
"\n This is used to layout the :code:`default_config` dictionary into yaml format\n for :code:`default_config`'s docstring.\n "
def _append_prefix_spaces(docstring: str):
return '\n'.join([f' {line}' for line in docstring.split('\n')])
obj = cls()
try:
config = obj.default_config()
except:
return
else:
methods = []
for (k, v) in config.items():
if hasattr(cls, k):
methods.append(getattr(cls, k))
method_links = ' '.join([f':obj:`{method.__name__}`' for method in methods])
yaml_str = yaml.dump(config, sort_keys=False, width=float('inf'))
yaml_str = _append_prefix_spaces(yaml_str)
cls.default_config.__doc__ = DEFAULT_CONFIG_FORMAT.format(method_links, yaml_str)
|
class Problem():
_store: Dict[(str, Problem)] = dict()
def __init_subclass__(cls) -> None:
super().__init_subclass__()
cls._store[cls.__name__] = cls
_doc_default_config(cls)
@classmethod
def get_class_from_name(cls, name: str):
'\n Args:\n name (str): the :code:`__name__` of the problem class\n\n Returns:\n Problem\n '
assert (name in cls._store), f"The class '{name}' is either not defined or not imported"
return cls._store[name]
def build_collate_fn(self, build_collate_fn: dict, mode: str):
'\n By default returns :obj:`s3prl.dataset.base.default_collate_fn`\n\n Args:\n build_collate_fn (dict): same in :obj:`default_config`, no argument supported for now\n mode (str): train, valid, or test\n\n Returns:\n callable\n\n the collate_fn for torch DataLoader in train/valid/test :code:`mode`\n '
return default_collate_fn
def build_upstream(self, build_upstream: dict):
'\n By default build the upstream with :obj:`s3prl.nn.upstream.S3PRLUpstream`\n\n Args:\n build_upstream (dict): same in :obj:`default_config`,\n arguments for :obj:`s3prl.nn.upstream.S3PRLUpstream`\n\n Returns:\n :obj:`s3prl.nn.interface.AbsUpstream`\n\n Return an upstream model, whose forward takes the waveform input and returns\n multiple hidden states as features.\n '
upstream = S3PRLUpstream(**build_upstream)
return upstream
def build_featurizer(self, build_featurizer: dict, upstream):
'\n By default build the featurizer with :obj:`s3prl.nn.Featurizer`\n\n Args:\n build_featurizer (dict): same in :obj:`default_config`,\n arguments for :obj:`s3prl.nn.Featurizer`\n upstream (:obj:`AbsUpstream`): the upstream model built by :obj:`build_upstream`\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFeaturizer`\n\n Return the featurizer model. The featurizer is used to reduce the multiple\n hidden states returned from the upstream model (built by :obj:`build_upstream`)\n into a single hidden state, so can be easliy fed into the downstream model\n '
featurizer = Featurizer(upstream, **build_featurizer)
return featurizer
def build_model(self, build_model: dict, model_output_size: int, build_upstream: dict, build_featurizer: dict, build_downstream: dict):
"\n By default build model with :obj:`s3prl.nn.upstream.UpstreamDownstreamModel`\n\n Args:\n build_model (dict): same in :obj:`default_config`,\n arguments for :obj:`s3prl.nn.upstream.UpstreamDownstreamModel`\n model_output_size (int): the required model's output hidden size\n build_upstream (dict): same in :obj:`default_config`, refer to :obj:`build_upstream`\n build_featurizer (dict): same in :obj:`default_config`, refer to :obj:`build_featurizer`\n build_downstream (dict): same in :obj:`default_config`, refer to :obj:`build_downstream`\n\n Returns:\n torch.nn.Module\n\n Return the entire model for the task, which takes the direct items from DataLoader as the input.\n Usually, the components can be built by :obj:`build_upstream`, :obj:`build_featurizer`,\n :obj:`build_downstream`, and are concated together to get the final model.\n The upstream extracts multiple hidden states, the featuizer reduce them into a single hidden state,\n and the downstream takes the hidden states as the feature for the downstream-specific model.\n "
upstream = self.build_upstream(build_upstream)
featurizer: Featurizer = self.build_featurizer(build_featurizer, upstream)
downstream = self.build_downstream(build_downstream, featurizer.output_size, model_output_size, featurizer.downsample_rate)
model = UpstreamDownstreamModel(upstream, featurizer, downstream, **build_model)
return model
def build_optimizer(self, build_optimizer: dict, parameters):
'\n Args:\n build_optimizer (dict): same in :obj:`default_config`, refer to below\n\n ==================== ====================\n key description\n ==================== ====================\n name (str) - the optimizer class name in :obj:`torch.optim`\n conf (dict) - the arguments for initializing the optimizer class. e.g. :code:`{"lr": 1.0e-4}`\n ==================== ====================\n\n parameters (iterable): the standard params accepted by :obj:`torch.optim.Optimizer`.\n\n Returns:\n :obj:`torch.optim.Optimizer`\n\n An optimizer following standard torch usage\n '
def _default_build_optimizer(name: str, conf: dict):
opt_cls = getattr(torch.optim, name)
opt = opt_cls(parameters, **conf)
return opt
return _default_build_optimizer(**build_optimizer)
def build_scheduler(self, build_scheduler: dict, optimizer):
'\n Args:\n build_scheduler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n name (str) - the scheduler class name in :obj:`torch.optim.lr_scheduler`\n conf (dict) - the arguments for initializing the scheduler class. e.g. :code:`{"gamma": 0.01}` for :obj:`torch.optim.lr_scheduler.StepLR`\n ==================== ====================\n\n optimizer: the standard torch optimizer accepted by Scheduler in :obj:`torch.optim.lr_scheduler`.\n\n Returns:\n torch scheduler\n\n A scheduler following standard torch usage\n '
def _default_build_scheduler(name: str, conf: dict):
scheduler_cls = getattr(torch.optim.lr_scheduler, name)
scheduler = scheduler_cls(optimizer, **conf)
return scheduler
return _default_build_scheduler(**build_scheduler)
def train(self, train: dict, train_dir: str, build_model_all_args: dict, build_task_all_args_except_model: dict, save_model: dict, save_task: dict, build_optimizer: dict, build_scheduler: dict, evaluate: dict, train_dataset, train_batch_sampler, train_collate_fn, valid_dataset, valid_batch_sampler, valid_collate_fn, num_workers: int, world_size: int, rank: int, eval_batch: int, device: str, global_config: dict=None):
"\n Args:\n train (dict): same in :obj:`default_config`\n\n ========================== ====================\n key description\n ========================== ====================\n total_steps (int) - the total optimization steps\n log_step (int) - logging frequency. log every :code:`log_step` step\n eval_step (int) - evaluation frequency. Evaluate every :code:`eval_step` step. Note that you can control how many batch to evaluate to speed up the development by the :code:`eval_batch` argument in :obj:`run`\n save_step (int) - save the checkpoint every :code:`save_step` step.\n gradient_clipping (float) - clip the gradient. important for RNNs.\n gradient_accumulate (int) - accumulate multiple steps' gradient before updating network parameters to simulate large-batch optimization.\n valid_metric (str) - the metric to select the best valid checkpoint. Different Tasks have different supported valid_metrics. See :obj:`build_task` for the supported metrics.\n valid_higher_better (bool) - some metrics are higher better, while some are lower better this will affect how to save the best validation checkpoint.\n auto_resume (bool) - if there are already the last checkpoint in :code:`target_dir` (see :obj:`run`), whether to resume from it or delete it and start a new training session.\n resume_ckpt_dir (str) - you can directly specify the checkpoint path to resume which is not necessary in :code:`target_dir` (see :obj:`run`).\n seed (int) - fix the seed before the training start\n keep_num_ckpts (int) - to prevent saving too many checkpoints, only save the :code:`keep_num_ckpts` latest checkpoints and delete the old ones.\n use_scheduler (bool) - whether to use the scheduler\n ========================== ====================\n\n **others:\n only meaningful when you want to override this train method, which is not the\n common case. Hence we skip the documentation for now.\n "
@dataclass
class TrainConfig():
total_steps: int
log_step: int
eval_step: int
save_step: int
gradient_clipping: float
gradient_accumulate: int
valid_metric: str
valid_higher_better: bool
auto_resume: bool = True
resume_ckpt_dir: str = None
seed: int = 0
keep_num_ckpts: int = 2
use_scheduler: bool = False
conf = TrainConfig(**train)
fix_random_seeds(conf.seed)
train_dir: Path = Path(train_dir)
if ((not conf.auto_resume) and train_dir.is_dir()):
logger.warning(f'{train_dir} exists. Delete the directory since auto_resume=False')
shutil.rmtree(train_dir)
train_dir.mkdir(exist_ok=True, parents=True)
ckpt_dirs = [key for key in os.listdir(train_dir) if key.startswith('step_')]
ckpt_dirs.sort(key=(lambda name: int(name.split('_')[(- 1)])), reverse=True)
resume = False
if conf.auto_resume:
if ((conf.resume_ckpt_dir is not None) and Path(conf.resume_ckpt_dir).is_dir()):
resume = True
if (len(ckpt_dirs) > 0):
resume = True
if resume:
resume_ckpt_dir = Path((conf.resume_ckpt_dir or (train_dir / ckpt_dirs[0])))
logger.info(f'Loading checkpoints from {resume_ckpt_dir}')
try:
(_, task) = self.load_model_and_task(resume_ckpt_dir)
except:
logger.error(f"Fail to load the checkpoint {resume_ckpt_dir}. You can set '--train.auto_resume False' to ignore the crashed checkpoint to avoid this behavior.")
raise
optimizer_state = torch.load((resume_ckpt_dir / 'optimizer.pt'), map_location='cpu')
if conf.use_scheduler:
scheduler_state = torch.load((resume_ckpt_dir / 'scheduler.pt'), map_location='cpu')
else:
scheduler_state = None
with open((resume_ckpt_dir / 'training_stats.yaml'), 'r') as f:
training_stats = yaml.load(f, Loader=yaml.FullLoader)
global_step = int(training_stats['global_step'])
epoch = int(training_stats['epoch'])
valid_best_metrics = dict(training_stats['valid_best_metrics'])
else:
model = self.build_model(**build_model_all_args)
task = self.build_task(model=model, **build_task_all_args_except_model)
optimizer_state = None
scheduler_state = None
global_step = 0
epoch = 0
valid_best_metrics = dict()
device = torch.device(device)
wrapped_task = task.to(device)
if (world_size > 1):
torch.cuda.set_device(device.index)
wrapped_task = _DistributedDataParallel(task, device_ids=[device.index], find_unused_parameters=True, output_device=device.index)
optimizer = self.build_optimizer(build_optimizer, task.parameters())
if optimizer_state:
optimizer.load_state_dict(optimizer_state)
scheduler = None
if conf.use_scheduler:
scheduler = self.build_scheduler(build_scheduler, optimizer)
if scheduler_state:
scheduler.load_state_dict(scheduler_state)
train_batch_sampler = DistributedBatchSamplerWrapper(train_batch_sampler, num_replicas=world_size, rank=rank)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_batch_sampler, num_workers=num_workers, collate_fn=train_collate_fn)
tqdm_file = (sys.stderr if (rank == 0) else open(os.devnull, 'w'))
pbar = tqdm(total=conf.total_steps, dynamic_ncols=True, desc='train', file=tqdm_file)
pbar.n = global_step
if (rank == 0):
tf_dir = (train_dir / 'tb')
tf_logger = SummaryWriter(str(tf_dir))
def _save_ckpts_to_dir(ckpts_dir: str, task, optimizer, scheduler, build_model_all_args: dict, build_task_all_args_except_model: dict, save_model: dict, save_task: dict, training_stats: dict, global_config: dict):
ckpts_dir: Path = Path(ckpts_dir)
ckpts_dir.mkdir(exist_ok=True, parents=True)
model_ckpt_dir = (ckpts_dir / 'model')
self.save_model(save_model, model_ckpt_dir, build_model_all_args, task.model)
task_ckpt_dir = (ckpts_dir / 'task')
self.save_task(save_task, task_ckpt_dir, build_task_all_args_except_model, task)
torch.save(optimizer.state_dict(), (ckpts_dir / 'optimizer.pt'))
if (scheduler is not None):
torch.save(scheduler.state_dict(), (ckpts_dir / 'scheduler.pt'))
with (ckpts_dir / 'training_stats.yaml').open('w') as f:
yaml.safe_dump(training_stats, f)
with (ckpts_dir / 'config.yaml').open('w') as f:
yaml.safe_dump(global_config, f)
backward_steps = 0
while (pbar.n < pbar.total):
(train_batch_sampler.set_epoch(epoch),)
batch_results = []
logger.info(f'Start epoch {epoch}')
for batch in train_dataloader:
try:
if (pbar.n >= pbar.total):
break
global_step = (pbar.n + 1)
wrapped_task.train()
batch = _to_device(batch, device)
(loss, cacheable) = wrapped_task('train', **batch)
(loss / conf.gradient_accumulate).backward()
batch_results.append(_force_cacheable(cacheable))
except RuntimeError as e:
if (world_size > 1):
raise
acceptable = False
for acc_err in ACCEPTABLE_ERRORS:
if (str(e) in acc_err):
acceptable = True
break
if (not acceptable):
raise
logger.warning(f'Step {global_step}: {str(e)}')
with torch.cuda.device(device):
torch.cuda.empty_cache()
optimizer.zero_grad()
continue
backward_steps += 1
if ((backward_steps % conf.gradient_accumulate) > 0):
continue
grad_norm = torch.nn.utils.clip_grad_norm_(wrapped_task.parameters(), conf.gradient_clipping)
if math.isnan(grad_norm):
logger.warning(f'[Runner] - grad norm is NaN at step {global_step}')
else:
optimizer.step()
optimizer.zero_grad()
if conf.use_scheduler:
scheduler.step()
if (rank > 0):
batch_results = []
pbar.update(1)
continue
def _log_results(split_name: str, logs: dict, tensorboard: SummaryWriter, global_step: int):
logger.info(f'{split_name} at step {global_step}')
for (name, value) in logs.items():
value = float(value)
logger.info(f'{name}: {value}')
tensorboard.add_scalar(f'{split_name}-{name}', value, global_step=global_step)
if ((global_step % conf.log_step) == 0):
logs = wrapped_task.reduction('train', batch_results)
_log_results('train', logs, tf_logger, global_step)
batch_results = []
save_names = []
if ((global_step % conf.eval_step) == 0):
assert ((valid_dataset is not None) and (valid_batch_sampler is not None)), f'valid dataset is not supported, please set train.eval_step to infinite'
logs: dict = self.evaluate(evaluate, 'valid', task, valid_dataset, valid_batch_sampler, valid_collate_fn, eval_batch, train_dir, device, num_workers)
_log_results('valid', logs, tf_logger, global_step)
valid_metrics = {k: float(v) for (k, v) in logs.items()}
new_metric = valid_metrics[conf.valid_metric]
best_metric = valid_best_metrics.get(conf.valid_metric)
if (best_metric is None):
is_new_best = True
elif conf.valid_higher_better:
is_new_best = (new_metric > best_metric)
else:
is_new_best = (new_metric < best_metric)
if is_new_best:
valid_best_metrics = deepcopy(valid_metrics)
save_names.append('valid_best')
if ((global_step % conf.save_step) == 0):
ckpt_dirs = [key for key in os.listdir(train_dir) if key.startswith('step_')]
ckpt_dirs.sort(key=(lambda stem: int(stem.split('_')[(- 1)])))
if ((conf.keep_num_ckpts is not None) and (len(ckpt_dirs) >= conf.keep_num_ckpts)):
for ckpt_dir in ckpt_dirs[:((len(ckpt_dirs) - conf.keep_num_ckpts) + 1)]:
shutil.rmtree((train_dir / ckpt_dir))
save_names.append(f'step_{global_step}')
for name in save_names:
training_stats = dict(global_step=global_step, epoch=epoch, valid_best_metrics=valid_best_metrics)
_save_ckpts_to_dir((train_dir / name), (task.module if isinstance(task, _DistributedDataParallel) else task), optimizer, scheduler, build_model_all_args, build_task_all_args_except_model, save_model, save_task, training_stats, global_config)
pbar.update(1)
epoch += 1
pbar.close()
if (rank == 0):
tf_logger.close()
def evaluate(self, evaluate: dict, mode: str, task, dataset, batch_sampler, collate_fn, eval_batch: int, dump_dir: str, device: str, num_workers: int):
'\n The evaluate routine used by :obj:`train` (during validation phase) and :obj:`run`\n (during testing phase).\n\n Args:\n evaluate (dict): same in :obj:`default_config`, no argument supported for now\n **others:\n only meaningful when you want to override this train method, which is not the\n common case. Hence we skip the documentation for now.\n\n '
assert (mode in ['valid', 'test'])
dataloader = DataLoader(dataset, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn)
task = task.to(device)
with torch.no_grad():
batch_results = []
for (batch_idx, batch) in enumerate(tqdm(dataloader, desc=mode, total=len(dataloader))):
if (batch_idx == eval_batch):
break
batch = _to_device(batch, device)
task.eval()
(loss, cacheable) = task(mode, _dump_dir=dump_dir, **batch)
batch_results.append(_force_cacheable(cacheable))
logs = task.reduction(mode, batch_results, _dump_dir=dump_dir)
return logs
def save_model(self, save_model: dict, model_ckpt_dir: str, build_model_all_args: dict, model: torch.nn.Module):
'\n Save the model state_dict and the model initialization arguments into the given directory.\n If you override this method, it is highly possible you also need to override :obj:`load_model`\n\n Args:\n save_model (dict): same in :obj:`default_config`, so the user can save additional settings,\n like the configuration of the dataset by duplicating the dataset hypers\n inside the :code:`save_model` field. You can rely on the :code:`omegaconf`\n package to simplify the duplication.\n model_ckpt_dir (str): save the model into the this directory.\n build_model_all_args (dict): all the arguments of :obj:`build_model`.\n By saving this dictionary, you can easily reconstruct the same model\n by calling :obj:`build_model` with the saved dictionary.\n model (torch.nn.Module): the model to be saved.\n\n Returns:\n None\n '
model_ckpt_dir: Path = Path(model_ckpt_dir)
if model_ckpt_dir.is_dir():
shutil.rmtree(model_ckpt_dir, ignore_errors=True)
model_ckpt_dir.mkdir(exist_ok=True, parents=True)
with (model_ckpt_dir / 'problem_name').open('w') as f:
f.write(f'{self.__class__.__name__}')
torch.save(model.state_dict(), (model_ckpt_dir / 'state_dict.pt'))
with (model_ckpt_dir / f'arguments.yaml').open('w') as f:
yaml.safe_dump(build_model_all_args, f)
if (len(save_model) > 0):
with (model_ckpt_dir / 'extra_conf.yaml').open('w') as f:
yaml.safe_dump(save_model, f)
def load_model(self, model_ckpt_dir: str):
'\n Return the saved model.\n\n Args:\n model_ckpt_dir (str): Restore the model with :obj:`build_model` and the checkpoint\n saved in this directory.\n\n Return:\n :obj:`torch.nn.Module`\n '
model_ckpt_dir: Path = Path(model_ckpt_dir)
with (model_ckpt_dir / 'arguments.yaml').open('r') as f:
arguments = yaml.load(f, Loader=yaml.SafeLoader)
model = self.build_model(**arguments)
state_dict = torch.load((model_ckpt_dir / 'state_dict.pt'), map_location='cpu')
model.load_state_dict(state_dict)
return model
def save_task(self, save_task: dict, task_ckpt_dir: str, build_task_all_args_except_model: dict, task: Task):
"\n Save the task's state, :code:`task.get_state()`, and the initialization arguments into the given\n directory. If you override this method, it is highly possible you also need to override\n :obj:`load_task`.\n\n Args:\n save_task (dict): same in :obj:`default_config`, so the user can save additional settings,\n like the configuration of the dataset by duplicating the dataset hypers\n inside the :code:`save_task` field. You can rely on the :code:`omegaconf`\n package to simplify the duplication.\n task_ckpt_dir (str): save the task into this directory.\n build_task_all_args_except_model (dict): all the arguments of :obj:`build_task` except\n the :code:`model` argument since the model should be sapartely saved by\n :obj:`save_model`. By saving this dictionary, you can easily reconstruct the same task\n by calling :obj:`build_task` with the saved dictionary.\n task (Task): the task to be saved.\n\n Returns:\n None\n "
task_ckpt_dir: Path = Path(task_ckpt_dir)
if task_ckpt_dir.is_dir():
shutil.rmtree(task_ckpt_dir, ignore_errors=True)
task_ckpt_dir.mkdir(exist_ok=True, parents=True)
with (task_ckpt_dir / 'problem_name').open('w') as f:
f.write(f'{self.__class__.__name__}')
torch.save(task.get_state(), (task_ckpt_dir / 'state.pt'))
arguments = build_task_all_args_except_model
arguments_dir = (task_ckpt_dir / 'arguments')
arguments_dir.mkdir(exist_ok=True, parents=True)
for (k, v) in arguments.items():
try:
yaml.safe_dump(v)
except:
with (arguments_dir / f'{k}.pkl').open('wb') as f:
pickle.dump(v, f)
else:
with (arguments_dir / f'{k}.yaml').open('w') as f:
yaml.safe_dump(v, f)
if (len(save_task) > 0):
with (task_ckpt_dir, 'extra_conf.yaml').open('w') as f:
yaml.safe_dump(save_task, f)
def load_task(self, task_ckpt_dir: str, model: torch.nn.Module, task_overrides: dict=None):
"\n Return the saved task.\n\n Args:\n task_ckpt_dir (str): Restore the task with :obj:`build_task` and the checkpoint\n saved in this directory.\n model (torch.nn.Module): the model for the task, since the model is separately saved\n and is required for :obj:`build_task`.\n task_overrides (dict): overrides the saved initialization arguments, so can change\n the loaded task's behavior. Like, change the decoding hyperparameters.\n\n Returns:\n :obj:`s3prl.task.Task`\n "
task_ckpt_dir: Path = Path(task_ckpt_dir)
task_overrides = (task_overrides or {})
arguments = task_overrides.copy()
arguments_dir = (task_ckpt_dir / 'arguments')
for filename in os.listdir(arguments_dir):
filepath = (arguments_dir / filename)
key = filepath.stem
if (key in task_overrides):
continue
if (filepath.suffix == '.yaml'):
with filepath.open('r') as f:
value = yaml.load(f, Loader=yaml.SafeLoader)
elif (filepath.suffix == '.pkl'):
with filepath.open('rb') as f:
value = pickle.load(f)
assert (key not in arguments), f"Unexpected duplicated file stem '{key}' found in {arguments_dir}. Please delete one of them."
arguments[key] = value
task = self.build_task(model=model, **arguments)
state = torch.load((Path(task_ckpt_dir) / 'state.pt'), map_location='cpu')
task.set_state(state)
return task
def load_model_and_task(self, ckpts_dir: str, task_overrides: dict=None):
"\n This is a helper method to combine :obj:`load_model` and :obj:`load_task`\n together to directly load the model and the task. This method assumes\n the model is saved under :code:`ckpts_dir / 'model'` and the task is\n saved under :code:`ckpts_dir / 'task'`\n\n Returns:\n tuple\n\n 1. model (:obj:`torch.nn.Module`)\n 2. task (:obj:`s3prl.task.Task`)\n "
ckpts_dir: Path = Path(ckpts_dir)
task_overrides = (task_overrides or {})
model = self.load_model((ckpts_dir / 'model'))
task = self.load_task((ckpts_dir / 'task'), model, task_overrides)
return (model, task)
@staticmethod
def _get_current_arguments(exclude_self_and_cls: bool=True, flatten_dict: Union[(str, List[str])]=None) -> dict:
if isinstance(flatten_dict, str):
flatten_dict = [flatten_dict]
frame = inspect.currentframe().f_back
(args, _, _, values) = inspect.getargvalues(frame)
config = {key: values[key] for key in args}
if exclude_self_and_cls:
config.pop('self', None)
config.pop('cls', None)
if (flatten_dict is not None):
flatten_config = {}
for (k, v) in config.items():
if (k in flatten_dict):
assert isinstance(v, dict)
for (_k, _v) in v.items():
flatten_config[_k] = _v
else:
flatten_config[k] = v
config = flatten_config
def assert_no_missing(config: dict):
omegaconf.OmegaConf.to_container(omegaconf.OmegaConf.create(config), throw_on_missing=True)
assert_no_missing(config)
return config
@staticmethod
def _get_time_tag():
return datetime.fromtimestamp(time()).strftime('%Y_%m_%d_%H_%M_%S')
@staticmethod
def _stage_check(stage_id: int, stop: int, check_fn: callable):
try:
check_fn()
except:
logger.error(f'Stage {stage_id} was not done before or is corrupted. Please re-run from this stage.')
raise
if (isinstance(stop, int) and (stage_id == stop)):
exit(0)
def main(self, args: List[str]=None):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', default='INFO')
parser.add_argument('--config', help='The yaml config path to override the default config')
parser.add_argument('--print_config', '-p', action='store_true')
parser.add_argument('--dump_config', '-d', help='The path to dump the default config as yaml')
(args, override) = parser.parse_known_args(args)
if args.print_config:
print(f'''
Default config of {self}
''')
print(yaml.safe_dump(self.default_config()))
exit(0)
if (args.dump_config is not None):
with open(args.dump_config, 'w') as f:
yaml.safe_dump(self.default_config(), f)
exit(0)
root_logger = logging.getLogger()
root_logger.handlers = []
logging.basicConfig(level=getattr(logging, args.verbose), format=LOGGING_FORMAT)
if (args.config is not None):
with open(args.config) as f:
yaml_conf = (yaml.load(f, Loader=yaml.FullLoader) or dict())
else:
yaml_conf = dict()
override_conf = parse_overrides(override)
schema = omegaconf.OmegaConf.create(self.default_config())
config = omegaconf.OmegaConf.merge(schema, yaml_conf, override_conf)
config = omegaconf.OmegaConf.to_container(config, resolve=True, throw_on_missing=True)
logger.info(config)
self.run(**config)
return config
|
def resample_hear_corpus(task_dir: str, target_sr: int=16000, num_workers: int=6):
'\n Resample audio files in\n\n ${task_dir}/48000/\n\n to\n\n ${task_dir}/${target_sr}/\n '
task_dir: Path = Path(task_dir)
target_audio_dir: Path = (task_dir / f'{target_sr}')
if target_audio_dir.is_dir():
logger.info(f'{target_audio_dir} already exist. Do not need to resample')
return
default_audio_dir = (task_dir / '48000')
assert default_audio_dir.exists(), f'{default_audio_dir} not found'
split_names = os.listdir(default_audio_dir)
for split_name in sorted(split_names):
split_dir = (default_audio_dir / split_name)
wav_paths = find_files(split_dir)
tgt_dir = (target_audio_dir / split_name)
tgt_dir.mkdir(exist_ok=True, parents=True)
def resample(wav_path: str):
(wav, sr) = torchaudio.load(wav_path)
if (sr != target_sr):
resampler = torchaudio.transforms.Resample(sr, target_sr)
wav = resampler(wav)
torchaudio.save(str((tgt_dir / Path(wav_path).name)), wav, sample_rate=target_sr)
logger.info(f'Resampling {split_dir} to {tgt_dir}:')
Parallel(n_jobs=num_workers)((delayed(resample)(path) for path in tqdm(wav_paths)))
|
class CommonExample(SuperbSID):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
config['train'] = dict(total_steps=10, log_step=1, eval_step=5, save_step=5, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='accuracy', valid_higher_better=True, auto_resume=True)
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
target_dir: Path = Path(target_dir)
wavs = [torch.randn(1, (16000 * 2)) for i in range(5)]
wav_paths = []
for (idx, wav) in enumerate(wavs):
wav_path = str((Path(target_dir) / f'{idx}.wav'))
torchaudio.save(wav_path, wav, sample_rate=16000)
wav_paths.append(wav_path)
ids = [Path(path).stem for path in wav_paths]
labels = ['a', 'a', 'b', 'c', 'd']
df = pd.DataFrame({'id': ids, 'wav_path': wav_paths, 'label': labels})
(train_csv, valid_csv, test_csv) = ((target_dir / 'train.csv'), (target_dir / 'valid.csv'), (target_dir / 'test.csv'))
df.iloc[:3].to_csv(train_csv)
df.iloc[3:4].to_csv(valid_csv)
df.iloc[4:].to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
|
class HearBeijingOpera(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=BEIJING_OPERA_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearCremaD(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=CREMAD_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
def dcase_2016_task2(target_dir: str, cache_dir: str, dataset_root: str, get_path_only: bool=False):
target_dir: Path = Path(target_dir)
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
resample_hear_corpus(dataset_root, target_sr=16000)
dataset_root = Path(dataset_root)
wav_root: Path = (dataset_root / '16000')
def json_to_csv(json_path: str, csv_path: str, split: str):
with open(json_path) as fp:
metadata = json.load(fp)
data = defaultdict(list)
for utt in metadata:
wav_path: Path = ((wav_root / split) / utt).resolve()
assert wav_path.is_file()
info = torchaudio.info(wav_path)
baseinfo = {'record_id': utt, 'wav_path': str(wav_path), 'duration': (info.num_frames / info.sample_rate)}
for segment in metadata[utt]:
fullinfo = deepcopy(baseinfo)
fullinfo['utt_id'] = f"{baseinfo['record_id']}-{int(segment['start'])}-{int(segment['end'])}"
fullinfo['labels'] = segment['label']
fullinfo['start_sec'] = (segment['start'] / 1000)
fullinfo['end_sec'] = (segment['end'] / 1000)
for (key, value) in fullinfo.items():
data[key].append(value)
pd.DataFrame(data=data).to_csv(csv_path, index=False)
json_to_csv((dataset_root / 'train.json'), train_csv, 'train')
json_to_csv((dataset_root / 'valid.json'), valid_csv, 'valid')
json_to_csv((dataset_root / 'test.json'), test_csv, 'test')
return (train_csv, valid_csv, [test_csv])
|
class HearDcase2016Task2(HearFSD):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_dataset=dict(train=dict(chunk_secs=4.0, step_secs=4.0), valid=dict(chunk_secs=4.0, step_secs=4.0), test=dict(chunk_secs=4.0, step_secs=4.0)), build_batch_sampler=dict(train=dict(batch_size=5, shuffle=True)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multilabel', scores=['event_onset_200ms_fms', 'segment_1s_er'], postprocessing_grid={'median_filter_ms': [250], 'min_duration': [125, 250]}), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='event_onset_200ms_fms', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
return dcase_2016_task2(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int):
@dataclass
class Config():
train: dict = None
valid: dict = None
test: dict = None
conf = Config(**build_dataset)
conf = getattr(conf, mode)
conf = (conf or {})
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
df = pd.read_csv(data_csv)
df['label'] = [encoder.encode(label) for label in df['labels'].tolist()]
dataset = FrameLabelDataset(df, len(encoder), frame_shift, **conf)
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset):
@dataclass
class Config():
train: dict = None
valid: dict = None
test: dict = None
conf = Config(**build_batch_sampler)
if (mode == 'train'):
return FixedBatchSizeBatchSampler(dataset, **(conf.train or {}))
elif (mode == 'valid'):
record_ids = get_info(dataset, ['record_id'], (target_dir / 'valid_stats'))
return GroupSameItemSampler(record_ids)
elif (mode == 'test'):
record_ids = get_info(dataset, ['record_id'], (target_dir / 'test_stats'))
return GroupSameItemSampler(record_ids)
else:
raise ValueError(f'Unsupported mode: {mode}')
def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None):
def df_to_events(df: pd.DataFrame):
data = {}
for (rowid, row) in df.iterrows():
record_id = row['record_id']
if (not (record_id in data)):
data[record_id] = []
data[record_id].append({'start': (row['start_sec'] * 1000), 'end': (row['end_sec'] * 1000), 'label': row['labels']})
return data
valid_events = (None if (valid_df is None) else df_to_events(valid_df))
test_events = (None if (test_df is None) else df_to_events(test_df))
return EventPredictionTask(model, encoder, valid_target_events=valid_events, test_target_events=test_events, **build_task)
|
def hear_scene_kfolds(target_dir: str, cache_dir: str, dataset_root: str, test_fold: int, num_folds: int, get_path_only: bool=False):
assert (test_fold < num_folds), f'test_fold id must be smaller than num_folds. get test_fold={test_fold} and num_folds={num_folds}'
target_dir = Path(target_dir)
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
resample_hear_corpus(dataset_root, target_sr=16000)
dataset_root = Path(dataset_root)
wav_root: Path = (dataset_root / '16000')
def load_json(filepath):
with open(filepath, 'r') as fp:
return json.load(fp)
fold_metas = []
fold_datas = []
for fold_id in range(num_folds):
meta = load_json((dataset_root / f'fold{fold_id:2d}.json'.replace(' ', '0')))
fold_metas.append(meta)
data = defaultdict(list)
for k in list(meta.keys()):
wav_path = ((wav_root / f'fold{fold_id:2d}'.replace(' ', '0')) / k)
labels = meta[k]
data['id'].append(k)
data['wav_path'].append(wav_path)
data['labels'].append(','.join([str(label).strip() for label in labels]))
df = pd.DataFrame(data=data)
fold_datas.append(df)
test_id = test_fold
valid_id = ((test_fold + 1) % num_folds)
train_ids = [idx for idx in range(num_folds) if (idx not in [test_id, valid_id])]
test_data = fold_datas[test_id]
valid_data = fold_datas[valid_id]
train_data = []
for idx in train_ids:
train_data.append(fold_datas[idx])
train_data = pd.concat(train_data)
train_data.to_csv(train_csv, index=False)
valid_data.to_csv(valid_csv, index=False)
test_data.to_csv(test_csv, index=False)
return (train_csv, valid_csv, [test_csv])
|
class HearESC50(HearFSD):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=ESC50_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=4000, log_step=100, eval_step=500, save_step=100, gradient_clipping=1.0, gradient_accumulate=4, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
return hear_scene_kfolds(**self._get_current_arguments(flatten_dict='prepare_data'))
|
def hear_scene_trainvaltest(target_dir: str, cache_dir: str, dataset_root: str, get_path_only: bool=False):
target_dir = Path(target_dir)
resample_hear_corpus(dataset_root, target_sr=16000)
dataset_root = Path(dataset_root)
wav_root: Path = (dataset_root / '16000')
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test_csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
def load_json(filepath):
with open(filepath, 'r') as fp:
return json.load(fp)
def split_to_df(split: str) -> pd.DataFrame:
meta = load_json((dataset_root / f'{split}.json'))
data = defaultdict(list)
for k in list(meta.keys()):
data['id'].append(k)
data['wav_path'].append(((wav_root / split) / k))
data['labels'].append(' ; '.join([str(label).strip() for label in meta[k]]))
return pd.DataFrame(data=data)
split_to_df('train').to_csv(train_csv, index=False)
split_to_df('valid').to_csv(valid_csv, index=False)
split_to_df('test').to_csv(test_csv, index=False)
return (train_csv, valid_csv, [test_csv])
|
class HearFSD(SuperbSID):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_batch_sampler=dict(train=dict(batch_size=10, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multilabel', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=40000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='mAP', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
return hear_scene_trainvaltest(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False):
encoder_path = (Path(target_dir) / 'encoder.pkl')
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
all_labels = []
for (rowid, row) in all_csv.iterrows():
labels = str(row['labels']).split(';')
labels = [l.strip() for l in labels]
all_labels.extend(labels)
encoder = CategoryEncoder(all_labels)
with open(encoder_path, 'wb') as f:
pickle.dump(encoder, f)
return encoder_path
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int):
df = pd.read_csv(data_csv)
ids = df['id'].tolist()
wav_paths = df['wav_path'].tolist()
labels = [[single_label.strip() for single_label in str(label_str).split(';')] for label_str in df['labels'].tolist()]
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
audio_loader = LoadAudio(wav_paths)
label_encoder = EncodeMultiLabel(labels, encoder)
class Dataset():
def __len__(self):
return len(audio_loader)
def __getitem__(self, index: int):
audio = audio_loader[index]
label = label_encoder[index]
return {'x': audio['wav'], 'x_len': audio['wav_len'], 'y': label['binary_labels'], 'labels': label['labels'], 'unique_name': ids[index]}
dataset = Dataset()
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset):
@dataclass
class Config():
train: dict = None
valid: dict = None
test: dict = None
conf = Config(**build_batch_sampler)
return FixedBatchSizeBatchSampler(dataset, **(conf.train or {}))
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
return HearFullyConnectedPrediction(downstream_input_size, downstream_output_size, **build_downstream)
def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None):
return ScenePredictionTask(model, encoder, **build_task)
|
class HearGSC5hr(HearFSD):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearGtzan(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=GTZAN_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearGtzanMusicSpeech(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=GTZAN_MUSIC_SPEECH_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearGunshot(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=GUNSHOT_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearLibriCount(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=LIBRICOUNT_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
def prepare_maestro(target_dir: str, cache_dir: str, dataset_root: str, test_fold: int=0, get_path_only: bool=False):
target_dir: Path = Path(target_dir)
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
assert (test_fold < MAESTRO_NUM_FOLDS), f"MAESTRO only has {MAESTRO_NUM_FOLDS} folds but get 'test_fold' arguments {test_fold}"
resample_hear_corpus(dataset_root, target_sr=16000)
dataset_root = Path(dataset_root)
wav_root = (dataset_root / '16000')
NUM_FOLD = 5
test_id = test_fold
valid_id = ((test_fold + 1) % NUM_FOLD)
train_ids = [idx for idx in range(NUM_FOLD) if (idx not in [test_id, valid_id])]
fold_metas = []
fold_dfs = []
for fold_id in range(NUM_FOLD):
with open((dataset_root / f'fold{fold_id:2d}.json'.replace(' ', '0'))) as f:
metadata = json.load(f)
fold_metas.append(metadata)
data = defaultdict(list)
for utt in metadata:
wav_path = ((wav_root / f'fold{fold_id:2d}'.replace(' ', '0')) / utt).resolve()
info = torchaudio.info(wav_path)
baseinfo = {'record_id': utt, 'wav_path': str(wav_path), 'duration': (info.num_frames / info.sample_rate)}
for segment in metadata[utt]:
fullinfo = deepcopy(baseinfo)
fullinfo['utt_id'] = f"{baseinfo['record_id']}-{int(segment['start'])}-{int(segment['end'])}"
fullinfo['labels'] = segment['label']
fullinfo['start_sec'] = (segment['start'] / 1000)
fullinfo['end_sec'] = (segment['end'] / 1000)
for (key, value) in fullinfo.items():
data[key].append(value)
fold_dfs.append(pd.DataFrame(data=data))
(test_meta, test_data) = (fold_metas[test_id], fold_dfs[test_id])
(valid_meta, valid_data) = (fold_metas[valid_id], fold_dfs[valid_id])
(train_meta, train_data) = ({}, [])
for idx in train_ids:
train_meta.update(fold_metas[idx])
train_data.append(fold_dfs[idx])
train_data: pd.DataFrame = pd.concat(train_data)
train_data.to_csv(train_csv, index=False)
valid_data.to_csv(valid_csv, index=False)
test_data.to_csv(test_csv, index=False)
return (train_csv, valid_csv, [test_csv])
|
class HearMaestro(HearDcase2016Task2):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING), build_batch_sampler=dict(train=dict(batch_size=5, shuffle=True), valid=dict(item='record_id'), test=dict(item='record_id')), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multilabel', scores=['event_onset_50ms_fms', 'event_onset_offset_50ms_20perc_fms'], postprocessing_grid={'median_filter_ms': [150], 'min_duration': [50]}), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='event_onset_50ms_fms', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
return prepare_maestro(**self._get_current_arguments(flatten_dict='prepare_data'))
|
class HearNsynth5hr(HearFSD):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['pitch_acc', 'chroma_acc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='pitch_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearStroke(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=STROKE_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearTonic(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=TONIC_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearVocal(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=VOCAL_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='mAP', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class HearVoxLingual(HearESC50):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=VOX_LINQUAL_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
|
class Common(Problem):
def run(self, target_dir: str, cache_dir: str=None, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, prepare_data: dict=None, build_encoder: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None):
'\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the metadata file (waveform path, label...)\n 1 Build the encoder to encode the labels\n 2 Train the model\n 3 Evaluate the model on multiple test sets\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use the validation best\n checkpoint under the given :code:`target_dir` directory.\n **kwds:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n '
yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml')
yaml_path.parent.mkdir(exist_ok=True, parents=True)
with yaml_path.open('w') as f:
yaml.safe_dump(self._get_current_arguments(), f)
cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data'))
prepare_data: dict = (prepare_data or {})
build_encoder: dict = (build_encoder or {})
build_dataset: dict = (build_dataset or {})
build_batch_sampler: dict = (build_batch_sampler or {})
build_collate_fn: dict = (build_collate_fn or {})
build_upstream: dict = (build_upstream or {})
build_featurizer: dict = (build_featurizer or {})
build_downstream: dict = (build_downstream or {})
build_model: dict = (build_model or {})
build_task: dict = (build_task or {})
build_optimizer: dict = (build_optimizer or {})
build_scheduler: dict = (build_scheduler or {})
save_model: dict = (save_model or {})
save_task: dict = (save_task or {})
train: dict = (train or {})
evaluate = (evaluate or {})
target_dir: Path = Path(target_dir)
target_dir.mkdir(exist_ok=True, parents=True)
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True, parents=True)
if remove_all_cache:
shutil.rmtree(cache_dir)
stage_id = 0
if (start <= stage_id):
logger.info(f'Stage {stage_id}: prepare data')
(train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False)
(train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True)
def check_fn():
assert (Path(train_csv).is_file() and Path(valid_csv).is_file())
for test_csv in test_csvs:
assert Path(test_csv).is_file()
self._stage_check(stage_id, stop, check_fn)
stage_id = 1
if (start <= stage_id):
logger.info(f'Stage {stage_id}: build encoder')
encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=False)
encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=True)
def check_fn():
assert Path(encoder_path).is_file()
self._stage_check(stage_id, stop, check_fn)
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
model_output_size = len(encoder)
model = self.build_model(build_model, model_output_size, build_upstream, build_featurizer, build_downstream)
frame_shift = model.downsample_rate
stage_id = 2
train_dir = (target_dir / 'train')
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Train Model')
(train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, encoder_path, frame_shift, build_dataset, build_batch_sampler)
(valid_ds, valid_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'valid', valid_csv, encoder_path, frame_shift, build_dataset, build_batch_sampler)
with Path(encoder_path).open('rb') as f:
encoder = pickle.load(f)
build_model_all_args = dict(build_model=build_model, model_output_size=len(encoder), build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream)
build_task_all_args_except_model = dict(build_task=build_task, encoder=encoder, valid_df=pd.read_csv(valid_csv))
self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), valid_ds, valid_bs, self.build_collate_fn(build_collate_fn, 'valid'), device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank)
def check_fn():
assert (train_dir / 'valid_best').is_dir()
self._stage_check(stage_id, stop, check_fn)
stage_id = 3
if (start <= stage_id):
test_ckpt_dir: Path = Path((test_ckpt_dir or ((target_dir / 'train') / 'valid_best')))
assert test_ckpt_dir.is_dir()
logger.info(f'Stage {stage_id}: Test model: {test_ckpt_dir}')
for (test_idx, test_csv) in enumerate(test_csvs):
test_name = Path(test_csv).stem
test_dir: Path = (((target_dir / 'evaluate') / test_ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) / test_name)
test_dir.mkdir(exist_ok=True, parents=True)
logger.info(f'Stage {stage_id}.{test_idx}: Test model on {test_csv}')
(test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, encoder_path, frame_shift, build_dataset, build_batch_sampler)
(_, valid_best_task) = self.load_model_and_task(test_ckpt_dir, task_overrides={'test_df': pd.read_csv(test_csv)})
logs = self.evaluate(evaluate, 'test', valid_best_task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers)
test_metrics = {name: float(value) for (name, value) in logs.items()}
logger.info(f'test results: {test_metrics}')
with (test_dir / f'result.yaml').open('w') as f:
yaml.safe_dump(test_metrics, f)
def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int, build_dataset: dict, build_batch_sampler: dict):
logger.info(f'Build {mode} dataset')
dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, encoder_path, frame_shift)
logger.info(f'Build {mode} batch sampler')
batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, dataset)
return (dataset, batch_sampler)
def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None):
'\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`UtteranceClassificationTask`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n encoder: the encoder built by :obj:`build_encoder`\n\n Returns:\n Task\n '
task = UtteranceClassificationTask(model, encoder)
return task
|
def iemocap_for_superb(target_dir: str, cache_dir: str, iemocap: str, test_fold: int, valid_ratio: float=0.2, get_path_only: bool=False):
'\n Prepare IEMOCAP for emotion classfication with SUPERB protocol,\n following :obj:`SuperbER.prepare_data` format.\n\n .. note::\n\n In SUPERB protocol, you need to do 5-fold cross validation.\n\n Also, only use 4 emotion classes: :code:`happy`, :code:`angry`,\n :code:`neutral`, and :code:`sad` with balanced data points and\n the :code:`excited` class is merged into :code:`happy` class.\n\n Args:\n iemocap (str): The root path of the IEMOCAP\n test_fold (int): Which fold to use as the test fold, select from 0 to 4\n valid_ratio (float): given the remaining 4 folds, how many data to use as the validation set\n **others: refer to :obj:`SuperbER.prepare_data`\n '
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_path_only:
return (train_path, valid_path, test_paths)
corpus = IEMOCAP(iemocap)
all_datapoints = corpus.all_data
def format_fields(data: dict):
result = dict()
for data_id in data.keys():
datapoint = data[data_id]
result[data_id] = dict(wav_path=datapoint['wav_path'], label=datapoint['emotion'])
return result
def filter_data(data_ids: List[str]):
result = dict()
for data_id in data_ids:
data_point = all_datapoints[data_id]
if (data_point['emotion'] in ['neu', 'hap', 'ang', 'sad', 'exc']):
if (data_point['emotion'] == 'exc'):
data_point['emotion'] = 'hap'
result[data_id] = data_point
return result
test_session_id = (test_fold + 1)
train_meta_data_json = (Path(cache_dir) / f'test_session{test_session_id}_train_metadata.json')
test_meta_data_json = (Path(cache_dir) / f'test_session{test_session_id}_test_metadata.json')
download(train_meta_data_json, f'https://huggingface.co/datasets/s3prl/iemocap_split/raw/4097f2b496c41eed016d4e5eb0ada4cccd46d1f3/Session{test_session_id}/train_meta_data.json', refresh=False)
download(test_meta_data_json, f'https://huggingface.co/datasets/s3prl/iemocap_split/raw/4097f2b496c41eed016d4e5eb0ada4cccd46d1f3/Session{test_session_id}/test_meta_data.json', refresh=False)
with open(train_meta_data_json) as f:
metadata = json.load(f)['meta_data']
dev_ids = [Path(item['path']).stem for item in metadata]
with open(test_meta_data_json) as f:
metadata = json.load(f)['meta_data']
test_ids = [Path(item['path']).stem for item in metadata]
train_len = int(((1 - valid_ratio) * len(dev_ids)))
train_valid_lens = [train_len, (len(dev_ids) - train_len)]
torch.manual_seed(0)
(train_ids, valid_ids) = random_split(dev_ids, train_valid_lens)
train_data = format_fields(filter_data(train_ids))
valid_data = format_fields(filter_data(valid_ids))
test_data = format_fields(filter_data(test_ids))
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data['id'] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return (train_path, valid_path, test_paths)
|
class SuperbER(SuperbSID):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(iemocap=MISSING, test_fold=MISSING), build_encoder=dict(), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=4, shuffle=True), valid=dict(batch_size=4), test=dict(batch_size=4)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=30000, log_step=500, eval_step=1000, save_step=1000, gradient_clipping=1.0, gradient_accumulate=8, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
'\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`iemocap_for_superb` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n support arguments in :obj:`iemocap_for_superb`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n label (str) - a string label of the waveform\n start_sec (float) - optional, load the waveform from :code:`start_sec` seconds. If not presented or is :code:`math.nan`, load from the beginning.\n end_sec (float) - optional, load the waveform from :code:`end_sec` seconds. If not presented or is :code:`math.nan`, load to the end.\n ==================== ====================\n '
return iemocap_for_superb(**self._get_current_arguments(flatten_dict='prepare_data'))
|
def fsc_for_multi_classification(target_dir: str, cache_dir: str, dataset_root: str, n_jobs: int=6, get_path_only: bool=False):
'\n Prepare Fluent Speech Command for multi-class classfication\n following :obj:`SuperbIC.prepare_data` format. The standard usage\n is to use three labels jointly: action, object, and location.\n\n Args:\n dataset_root (str): The root path of Fluent Speech Command\n n_jobs (int): to speed up the corpus parsing procedure\n '
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_path_only:
return (train_path, valid_path, test_paths)
def format_fields(data_points: dict):
return {key: dict(wav_path=value['path'], labels=f"{value['action']} ; {value['object']} ; {value['location']}") for (key, value) in data_points.items()}
corpus = FluentSpeechCommands(dataset_root, n_jobs)
(train_data, valid_data, test_data) = corpus.data_split
train_data = format_fields(train_data)
valid_data = format_fields(valid_data)
test_data = format_fields(test_data)
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data['id'] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return (train_path, valid_path, test_paths)
|
class SuperbIC(Common):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_encoder=dict(), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=32), test=dict(batch_size=32)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=250, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None))
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
"\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`fsc_for_multi_classification` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n arguments for :obj:`fsc_for_multi_classification`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n labels (str) - the string labels of the waveform, separated by a ';'\n ==================== ====================\n\n The number of the label columns can be arbitrary.\n "
return fsc_for_multi_classification(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False):
'\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoders` from all the columns\n prefixing :code:`label` from all the csv files.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (bool): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n tokenizer_path: The tokenizer should be saved in the pickle format\n '
encoder_path = (Path(target_dir) / 'encoder.pkl')
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
multilabels = [[label.strip() for label in multilabel.split(';')] for multilabel in all_csv['labels'].tolist()]
encoder = CategoryEncoders([single_category_labels for single_category_labels in zip(*multilabels)])
with open(encoder_path, 'wb') as f:
pickle.dump(encoder, f)
return encoder
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int):
'\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n encoder_path (str): The pickled encoder path for encoding the labels\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_ids (torch.LongTensor) - the encoded class ids. shape: (num_class, )\n labels (List[str]) - the class name. length: num_class\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n '
csv = pd.read_csv(data_csv)
ids = csv['id'].tolist()
audio_loader = LoadAudio(csv['wav_path'].tolist())
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
label_encoder = EncodeCategories([[label.strip() for label in multilabel.split(';')] for multilabel in csv['labels'].tolist()], encoder)
class Dataset():
def __len__(self):
return len(audio_loader)
def __getitem__(self, index: int):
audio = audio_loader[index]
label = label_encoder[index]
return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_ids': label['class_ids'], 'labels': label['labels'], 'unique_name': ids[index]}
dataset = Dataset()
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset: Dataset):
'\n Return the batch sampler for torch DataLoader.\n By default call :obj:`superb_sid_batch_sampler` with :code:`**build_batch_sampler`.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
def _build_batch_sampler(train: dict=None, valid: dict=None, test: dict=None):
if (mode == 'train'):
return FixedBatchSizeBatchSampler(dataset, **train)
elif (mode == 'valid'):
return FixedBatchSizeBatchSampler(dataset, **valid)
elif (mode == 'test'):
return FixedBatchSizeBatchSampler(dataset, **test)
return _build_batch_sampler(**build_batch_sampler)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`MeanPoolingLinear` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`,\n support arguments of :obj:`MeanPoolingLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`AbsUtteranceModel`\n "
model = MeanPoolingLinear(downstream_input_size, downstream_output_size, **build_downstream)
return model
def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None):
'\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`UtteranceMultiClassClassificationTask`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n encoder: the encoder built by :obj:`build_encoder`\n valid_df (pd.DataFrame): metadata of the valid set\n test_df (pd.DataFrame): metadata of the test set\n\n Returns:\n Task\n '
return UtteranceMultiClassClassificationTask(model, encoder)
|
def gsc1_for_classification(target_dir: str, cache_dir: str, gsc1: str, gsc1_test: str, get_path_only: bool=False):
'\n Prepare Google Speech Command for classfication task\n following :obj:`SuperbKS.prepare_data` format.\n\n Args:\n gsc1 (str): The root path of the Google Speech Command V1 training set\n gsc1_test (str): The root path of the Google Speech Command V1 test set\n **others: refer to :obj:`SuperbKS.prepare_data`\n '
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_path_only:
return (train_path, valid_path, test_paths)
def gsc_v1_for_superb(gsc1: str, gsc1_test: str):
corpus = SpeechCommandsV1(gsc1, gsc1_test)
def format_fields(data: dict):
import torchaudio
formated_data = OrderedDict()
for (key, value) in data.items():
data_point = {'wav_path': value['wav_path'], 'label': value['class_name'], 'start_sec': None, 'end_sec': None}
if (value['class_name'] == '_silence_'):
info = torchaudio.info(value['wav_path'])
for start in list(range(info.num_frames))[::info.sample_rate]:
seg = data_point.copy()
end = min((start + (1 * info.sample_rate)), info.num_frames)
seg['start_sec'] = (start / info.sample_rate)
seg['end_sec'] = (end / info.sample_rate)
formated_data[f'{key}_{start}_{end}'] = seg
else:
formated_data[key] = data_point
return formated_data
(train_data, valid_data, test_data) = corpus.data_split
return (format_fields(train_data), format_fields(valid_data), format_fields(test_data))
(train_data, valid_data, test_data) = gsc_v1_for_superb(gsc1, gsc1_test)
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data['id'] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return (train_path, valid_path, test_paths)
|
class SuperbKS(SuperbSID):
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(gsc1=MISSING, gsc1_test=MISSING), build_encoder=dict(), build_dataset=dict(train=dict(sox_effects=[['channels', '1'], ['rate', '16000'], ['gain', '-3.0']]), valid=dict(sox_effects=[['channels', '1'], ['rate', '16000'], ['gain', '-3.0']]), test=dict(sox_effects=[['channels', '1'], ['rate', '16000'], ['gain', '-3.0']])), build_batch_sampler=dict(train=dict(batch_size=32), valid=dict(batch_size=32), test=dict(batch_size=32)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
'\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`gsc1_for_classification` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n support arguments in :obj:`gsc1_for_classification`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n label (str) - a string label of the waveform\n start_sec (float) - optional, load the waveform from :code:`start_sec` seconds. If not presented or is :code:`math.nan`, load from the beginning.\n end_sec (float) - optional, load the waveform from :code:`end_sec` seconds. If not presented or is :code:`math.nan`, load to the end.\n ==================== ====================\n '
return gsc1_for_classification(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False):
'\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoder` from the :code:`label` column of all the csv files.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n tokenizer_path: The tokenizer should be saved in the pickle format\n '
encoder_path = (Path(target_dir) / 'encoder.pkl')
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
labels = all_csv['label'].tolist()
encoder = CategoryEncoder(labels)
with open(encoder_path, 'wb') as f:
pickle.dump(encoder, f)
return encoder
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset: Dataset):
'\n Return the batch sampler for torch DataLoader.\n By default for train and valid, use :obj:`BalancedWeightedSampler`; for test use\n :obj:`FixedBatchSizeBatchSampler`\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`BalancedWeightedSampler`\n valid (dict) - arguments for :obj:`BalancedWeightedSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
def _build_batch_sampler(train: dict=None, valid: dict=None, test: dict=None):
train = (train or {})
valid = (valid or {})
test = (test or {})
csv = pd.read_csv(data_csv)
labels = csv['label'].tolist()
if (mode == 'train'):
return BalancedWeightedSampler(labels, **train)
elif (mode == 'valid'):
return BalancedWeightedSampler(labels, **valid)
elif (mode == 'test'):
return FixedBatchSizeBatchSampler(csv, **test)
return _build_batch_sampler(**build_batch_sampler)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_downsample_rate: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`MeanPoolingLinear` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`,\n support arguments of :obj:`MeanPoolingLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`AbsUtteranceModel`\n "
model = MeanPoolingLinear(downstream_input_size, downstream_output_size, **build_downstream)
return model
|
def voxceleb1_for_sid(target_dir: str, cache_dir: str, dataset_root: str, n_jobs: int=6, get_path_only: bool=False):
'\n Prepare VoxCeleb1 for SID following :obj:`SuperbSID.prepare_data` format.\n\n Args:\n dataset_root (str): The root path of VoxCeleb1\n n_jobs (int): to speed up the corpus parsing procedure\n **others: refer to :obj:`SuperbSID.prepare_data`\n '
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_path_only:
return (train_path, valid_path, test_paths)
corpus = VoxCeleb1SID(dataset_root, n_jobs)
(train_data, valid_data, test_data) = corpus.data_split
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data['id'] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return (train_path, valid_path, test_paths)
|
class SuperbSID(Common):
'\n The standard SUPERB SID task\n '
def default_config(self) -> dict:
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_encoder=dict(), build_dataset=dict(train=dict(max_secs=8.0)), build_batch_sampler=dict(train=dict(batch_size=8, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=500, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate=4, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
'\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`voxceleb1_for_sid` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`, support arguments in :obj:`voxceleb1_for_sid`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n label (str) - a string label of the waveform\n start_sec (float) - optional, load the waveform from :code:`start_sec` seconds. If not presented or is :code:`math.nan`, load from the beginning.\n end_sec (float) - optional, load the waveform from :code:`end_sec` seconds. If not presented or is :code:`math.nan`, load to the end.\n ==================== ====================\n '
return voxceleb1_for_sid(**self._get_current_arguments(flatten_dict='prepare_data'))
def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False):
'\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoder` from the :code:`label` column of all the csv files.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n encoder_path: The encoder should be saved in the pickle format\n '
encoder_path = (Path(target_dir) / 'encoder.pkl')
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
labels = all_csv['label'].tolist()
encoder = CategoryEncoder(labels)
with open(encoder_path, 'wb') as f:
pickle.dump(encoder, f)
return encoder_path
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int):
'\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`. with :code:`train`, :code:`valid`, :code:`test` keys, each\n is a dictionary with the following supported options:\n\n ==================== ====================\n key description\n ==================== ====================\n max_secs (float) - If a waveform is longer than :code:`max_secs` seconds, randomly crop the waveform into :code:`max_secs` seconds\n sox_effects (List[List[str]]) - If not None, apply sox effects on the utterance\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n encoder_path (str): The pickled encoder path for encoding the labels\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_id (int) - the encoded class id\n label (str) - the class name\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n '
@dataclass
class Config():
train: dict = None
valid: dict = None
test: dict = None
conf = Config(**build_dataset)
assert (mode in ['train', 'valid', 'test'])
if (mode == 'train'):
conf = (conf.train or {})
elif (mode == 'valid'):
conf = (conf.valid or {})
elif (mode == 'test'):
conf = (conf.test or {})
@dataclass
class SplitConfig():
max_secs: float = None
sox_effects: List[List[str]] = None
conf = SplitConfig(**conf)
csv = pd.read_csv(data_csv)
start_secs = None
if ('start_sec' in csv.columns):
start_secs = csv['start_sec'].tolist()
start_secs = [(None if math.isnan(sec) else sec) for sec in start_secs]
end_secs = None
if ('end_sec' in csv.columns):
end_secs = csv['end_sec'].tolist()
end_secs = [(None if math.isnan(sec) else sec) for sec in end_secs]
audio_loader = LoadAudio(csv['wav_path'].tolist(), start_secs, end_secs, max_secs=conf.max_secs, sox_effects=conf.sox_effects)
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
label_encoder = EncodeCategory(csv['label'].tolist(), encoder)
ids = csv['id'].tolist()
class Dataset():
def __len__(self):
return len(ids)
def __getitem__(self, index: int):
audio = audio_loader[index]
label = label_encoder[index]
return {'x': audio['wav'], 'x_len': audio['wav_len'], 'label': label['label'], 'class_id': label['class_id'], 'unique_name': ids[index]}
dataset = Dataset()
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset):
'\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
def _superb_sid_batch_sampler(train: dict=None, valid: dict=None, test: dict=None):
train = (train or {})
valid = (valid or {})
test = (test or {})
if (mode == 'train'):
sampler = FixedBatchSizeBatchSampler(dataset, **train)
elif (mode == 'valid'):
sampler = FixedBatchSizeBatchSampler(dataset, **valid)
elif (mode == 'test'):
sampler = FixedBatchSizeBatchSampler(dataset, **test)
return sampler
return _superb_sid_batch_sampler(**build_batch_sampler)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`MeanPoolingLinear` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, support arguments of :obj:`MeanPoolingLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsUtteranceModel`\n "
model = MeanPoolingLinear(downstream_input_size, downstream_output_size, **build_downstream)
return model
|
class Diarization(Problem):
def run(self, target_dir: str, cache_dir: str, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, num_speaker: int=2, prepare_data: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None, scoring: dict=None):
'\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the Kaldi-style data directory for speaker diarization\n 1 Train the model\n 2 Inference the prediction\n 3 Score the prediction\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use checkpoints specified by\n :code:`test_ckpts_steps`.\n num_speaker (int):\n How many speakers per utterance\n **others:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n '
yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml')
yaml_path.parent.mkdir(exist_ok=True, parents=True)
with yaml_path.open('w') as f:
yaml.safe_dump(self._get_current_arguments(), f)
cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data'))
prepare_data: dict = (prepare_data or {})
build_dataset: dict = (build_dataset or {})
build_batch_sampler: dict = (build_batch_sampler or {})
build_collate_fn: dict = (build_collate_fn or {})
build_upstream: dict = (build_upstream or {})
build_featurizer: dict = (build_featurizer or {})
build_downstream: dict = (build_downstream or {})
build_model: dict = (build_model or {})
build_task: dict = (build_task or {})
build_optimizer: dict = (build_optimizer or {})
build_scheduler: dict = (build_scheduler or {})
save_model: dict = (save_model or {})
save_task: dict = (save_task or {})
train: dict = (train or {})
evaluate: dict = (evaluate or {})
scoring: dict = (scoring or {})
target_dir: Path = Path(target_dir)
target_dir.mkdir(exist_ok=True, parents=True)
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True, parents=True)
if remove_all_cache:
shutil.rmtree(cache_dir, ignore_errors=True)
stage_id = 0
if (start <= stage_id):
logger.info(f'Stage {stage_id}: prepare data')
(train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False)
(train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True)
def check_fn():
assert (Path(train_csv).is_file() and Path(valid_csv).is_file())
for test_csv in test_csvs:
assert Path(test_csv).is_file()
self._stage_check(stage_id, stop, check_fn)
for csv in [train_csv, valid_csv, *test_csvs]:
data_dir = ((target_dir / 'kaldi_data') / Path(csv).stem)
csv_to_kaldi_dir(csv, data_dir)
train_data = ((target_dir / 'kaldi_data') / Path(train_csv).stem)
valid_data = ((target_dir / 'kaldi_data') / Path(valid_csv).stem)
test_datas = [((target_dir / 'kaldi_data') / Path(csv).stem) for csv in test_csvs]
test_rttms = []
for test_data in test_datas:
logger.info(f'Prepare RTTM for {test_data}')
test_rttm = (target_dir / f'{Path(test_data).stem}.rttm')
kaldi_dir_to_rttm(test_data, test_rttm)
test_rttms.append(test_rttm)
model_output_size = num_speaker
model = self.build_model(build_model, model_output_size, build_upstream, build_featurizer, build_downstream)
frame_shift = model.downsample_rate
stage_id = 1
train_dir = (target_dir / 'train')
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Train Model')
(train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, train_data, num_speaker, frame_shift, build_dataset, build_batch_sampler)
(valid_ds, valid_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'valid', valid_csv, valid_data, num_speaker, frame_shift, build_dataset, build_batch_sampler)
build_model_all_args = dict(build_model=build_model, model_output_size=model_output_size, build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream)
build_task_all_args_except_model = dict(build_task=build_task)
self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), valid_ds, valid_bs, self.build_collate_fn(build_collate_fn, 'valid'), device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank)
def check_fn():
assert (train_dir / 'valid_best').is_dir()
self._stage_check(stage_id, stop, check_fn)
stage_id = 2
test_ckpt_dir: Path = Path((test_ckpt_dir or ((target_dir / 'train') / 'valid_best')))
test_dirs = []
for (test_idx, test_data) in enumerate(test_datas):
test_name = Path(test_data).stem
test_dir: Path = (((target_dir / 'evaluate') / test_ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) / test_name)
test_dirs.append(test_dir)
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Test model: {test_ckpt_dir}')
for (test_idx, test_data) in enumerate(test_datas):
test_csv = test_csvs[test_idx]
test_dir = test_dirs[test_idx]
test_dir.mkdir(exist_ok=True, parents=True)
logger.info(f'Stage {stage_id}.{test_idx}: Test model on {test_dir} and dump prediction')
(test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, test_data, num_speaker, frame_shift, build_dataset, build_batch_sampler)
(_, valid_best_task) = self.load_model_and_task(test_ckpt_dir)
logs: dict = self.evaluate(evaluate, 'test', valid_best_task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers)
test_metrics = {name: float(value) for (name, value) in logs.items()}
with (test_dir / f'result.yaml').open('w') as f:
yaml.safe_dump(test_metrics, f)
def check_fn():
for test_dir in test_dirs:
assert (test_dir / 'prediction').is_dir()
self._stage_check(stage_id, stop, check_fn)
stage_id = 3
if (start <= stage_id):
logger.info(f'Stage {stage_id}: Score model: {test_ckpt_dir}')
self.scoring(scoring, stage_id, test_dirs, test_rttms, frame_shift)
return stage_id
def scoring(self, scoring: dict, stage_id: int, test_dirs: List[str], test_rttms: List[str], frame_shift: int):
'\n Score the prediction\n\n Args:\n scoring (dict):\n\n ==================== ====================\n key description\n ==================== ====================\n thresholds (List[int]) - Given the 0~1 (float) soft prediction, the threshold decides how to get the 0/1 hard prediction. This list are all the thresholds to try.\n median_filters (List[int]) - After getting hard prediction, use median filter to smooth out the prediction. This list are all the median filter sizes to try.\n ==================== ====================\n\n *others:\n This method is not designed to be overridden\n '
@dataclass
class ScoreConfig():
thresholds: List[int]
median_filters: List[int]
conf = ScoreConfig(**scoring)
for (test_idx, test_dir) in enumerate(test_dirs):
logger.info(f'Stage {stage_id}.{test_idx}: Make RTTM and Score from prediction')
(best_der, (best_th, best_med)) = make_rttm_and_score((test_dir / 'prediction'), (test_dir / 'score'), test_rttms[test_idx], frame_shift, conf.thresholds, conf.median_filters)
logger.info(f'Best dscore DER: {best_der}')
with (test_dir / 'dscore.yaml').open('w') as f:
yaml.safe_dump(dict(der=best_der, threshold=best_th, median_filter=best_med), f)
def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, data_dir: str, num_speakers: int, frame_shift: int, build_dataset: dict, build_batch_sampler: dict):
logger.info(f'Build {mode} dataset')
dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, data_dir, num_speakers, frame_shift)
logger.info(f'Build {mode} batch sampler')
batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, data_dir, dataset)
return (dataset, batch_sampler)
def build_task(self, build_task: dict, model):
'\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`DiarizationPIT`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n\n Returns:\n Task\n '
task = DiarizationPIT(model)
return task
|
class SuperbSD(Diarization):
def default_config(self):
return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(data_dir=MISSING), build_dataset=dict(chunk_size=2000, subsampling=1, rate=16000, use_last_samples=True, label_delay=0), build_batch_sampler=dict(train=dict(batch_size=8, shuffle=True), valid=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=512, rnn_layers=1), build_model=dict(upstream_trainable=False), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=30000, log_step=500, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate=4, valid_metric='der', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None), scoring=dict(thresholds=[0.3, 0.4, 0.5, 0.6, 0.7], median_filters=[1, 11]))
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False):
'\n Prepare the task-specific data metadata (path, labels...).\n\n Args:\n prepare_data (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n data_dir (str) - the standard Kaldi data directory\n ==================== ====================\n\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n record_id (str) - the id for the recording\n duration (float) - the total seconds of the recording\n wav_path (str) - the absolute path of the recording\n utt_id (str) - the id for the segmented utterance, should be globally unique across all recordings instead of just unique in a recording\n speaker (str) - the speaker label for the segmented utterance\n start_sec (float) - segment start second in the recording\n end_sec (float) - segment end second in the recording\n ==================== ====================\n\n Instead of one waveform file per row, the above file format is one segment per row,\n and a waveform file can have multiple overlapped segments uttered by different speakers.\n '
@dataclass
class Config():
data_dir: str
conf = Config(**prepare_data)
target_dir: Path = Path(target_dir)
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
kaldi_dir_to_csv((Path(conf.data_dir) / 'train'), train_csv)
kaldi_dir_to_csv((Path(conf.data_dir) / 'dev'), valid_csv)
kaldi_dir_to_csv((Path(conf.data_dir) / 'test'), test_csv)
return (train_csv, valid_csv, [test_csv])
def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, data_dir: str, num_speakers: int, frame_shift: int):
"\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, supports arguments for :obj:`DiarizationDataset`\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n data_dir (str): The converted kaldi data directory from :code:`data_csv`\n num_speakers (int): The number of speaker per utterance\n frame_shift (int): The frame shift of the upstream model (downsample rate from 16 KHz)\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n label (torch.LongTensor) - the binary label for each upstream frame, shape: :code:`(upstream_len, 2)`\n label_len (int) - the upstream feature's seq length :code:`upstream_len`\n record_id (str) - the unique id for the recording\n chunk_id (int) - since recording can be chunked into several segments for efficient training, this field indicate the segment's original position (order, 0-index) in the recording. This field is only useful during the testing stage\n ==================== ====================\n "
dataset = DiarizationDataset(mode, data_dir, frame_shift=frame_shift, num_speakers=num_speakers, **build_dataset)
return dataset
def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, data_dir: str, dataset):
'\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`GroupSameItemSampler`, should always use this batch sampler for the testing stage\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n data_dir (str): The converted kaldi data directory from :code:`data_csv`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n '
@dataclass
class Config():
train: dict = None
valid: dict = None
conf = Config(**build_batch_sampler)
if (mode == 'train'):
return FixedBatchSizeBatchSampler(dataset, **(conf.train or {}))
elif (mode == 'valid'):
return FixedBatchSizeBatchSampler(dataset, **(conf.valid or {}))
elif (mode == 'test'):
record_ids = get_info(dataset, ['record_id'])
return GroupSameItemSampler(record_ids)
else:
raise ValueError(f'Unsupported mode: {mode}')
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
"\n Return the task-specific downstream model.\n By default build the :obj:`SuperbDiarizationModel` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, support arguments of :obj:`SuperbDiarizationModel`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFrameModel`\n "
return SuperbDiarizationModel(downstream_input_size, downstream_output_size, **build_downstream)
|
def kaldi_dir_to_rttm(data_dir: str, rttm_path: str):
data_dir: Path = Path(data_dir)
segments_file = (data_dir / 'segments')
utt2spk_file = (data_dir / 'utt2spk')
assert segments_file.is_file()
assert utt2spk_file.is_file()
utt2spk = {}
with utt2spk_file.open() as f:
for utt2spk_line in f.readlines():
fields = utt2spk_line.strip().replace('\n', ' ').split()
assert (len(fields) == 2)
(utt, spk) = fields
utt2spk[utt] = spk
with Path(rttm_path).open('w') as rttm_f:
with segments_file.open() as f:
for segment_line in f.readlines():
fields = segment_line.strip().replace('\t', ' ').split()
assert (len(fields) == 4)
(utt, reco, start, end) = fields
spk = utt2spk[utt]
print(RTTM_FORMAT.format(reco, float(start), (float(end) - float(start)), spk), file=rttm_f)
|
def make_rttm_and_score(prediction_dir: str, score_dir: str, gt_rttm: str, frame_shift: int, thresholds: List[int], medians: List[int], subsampling: int=1, sampling_rate: int=16000):
Path(score_dir).mkdir(exist_ok=True, parents=True)
dscore_dir = (Path(score_dir) / 'dscore')
rttm_dir = (Path(score_dir) / 'rttm')
result_dir = (Path(score_dir) / 'result')
setting2dscore = []
for th in thresholds:
for med in medians:
logger.info(f'Make RTTM with threshold {th}, median filter {med}')
rttm_file = (rttm_dir / f'threshold-{th}_median-{med}.rttm')
make_rttm(prediction_dir, rttm_file, th, med, frame_shift, subsampling, sampling_rate)
logger.info(f'Scoring...')
result_file = (result_dir / f'threshold-{th}_median-{med}.result')
overall_der = score_with_dscore(dscore_dir, rttm_file, gt_rttm, result_file)
logger.info(f'DER: {overall_der}')
setting2dscore.append(((th, med), overall_der))
setting2dscore.sort(key=(lambda x: x[1]))
((best_th, best_med), best_der) = setting2dscore[0]
return (best_der, (best_th, best_med))
|
def make_rttm(prediction_dir: str, out_rttm_path: str, threshold: int, median: int, frame_shift: int, subsampling: int, sampling_rate: int):
names = sorted([name for name in os.listdir(prediction_dir)])
filepaths = [(Path(prediction_dir) / name) for name in names]
Path(out_rttm_path).parent.mkdir(exist_ok=True, parents=True)
with open(out_rttm_path, 'w') as wf:
for filepath in filepaths:
(session, _) = os.path.splitext(os.path.basename(filepath))
data = torch.load(filepath).numpy()
a = np.where((data > threshold), 1, 0)
if (median > 1):
a = medfilt(a, (median, 1))
factor = ((frame_shift * subsampling) / sampling_rate)
for (spkid, frames) in enumerate(a.T):
frames = np.pad(frames, (1, 1), 'constant')
(changes,) = np.where((np.diff(frames, axis=0) != 0))
for (s, e) in zip(changes[::2], changes[1::2]):
print(RTTM_FORMAT.format(session, (s * factor), ((e - s) * factor), ((session + '_') + str(spkid))), file=wf)
|
def score_with_dscore(dscore_dir: str, hyp_rttm: str, gt_rttm: str, score_result: str) -> float:
"\n This function returns the overall DER score, and will also write the detailed scoring results\n to 'score_result'\n "
dscore_dir: Path = Path(dscore_dir)
Path(score_result).parent.mkdir(exist_ok=True, parents=True)
if (not dscore_dir.is_dir()):
logger.info(f'Cloning dscore into {dscore_dir}')
subprocess.check_output(f'git clone https://github.com/nryant/dscore.git {dscore_dir}', shell=True).decode('utf-8')
subprocess.check_call(f'python3 {dscore_dir}/score.py -r {gt_rttm} -s {hyp_rttm} > {score_result}', shell=True)
return get_overall_der_from_dscore_file(score_result)
|
def get_overall_der_from_dscore_file(score_result: str):
with open(score_result) as file:
lines = file.readlines()
overall_lines = [line for line in lines if ('OVERALL' in line)]
assert (len(overall_lines) == 1)
overall_line = overall_lines[0]
overall_line = re.sub('\t+', ' ', overall_line)
overall_line = re.sub(' +', ' ', overall_line)
overall_der = float(overall_line.split(' ')[3])
return overall_der
|
def csv_to_kaldi_dir(csv: str, data_dir: str):
logger.info(f'Convert csv {csv} into kaldi data directory {data_dir}')
data_dir: Path = Path(data_dir)
data_dir.mkdir(exist_ok=True, parents=True)
df = pd.read_csv(csv)
required = ['record_id', 'wav_path', 'utt_id', 'speaker', 'start_sec', 'end_sec']
for r in required:
assert (r in df.columns)
reco2path = {}
reco2dur = {}
utt2spk = {}
spk2utt = {}
segments = []
for (rowid, row) in tqdm(df.iterrows(), total=len(df)):
(record_id, wav_path, duration, utt_id, speaker, start_sec, end_sec) = (row['record_id'], row['wav_path'], row['duration'], row['utt_id'], row['speaker'], row['start_sec'], row['end_sec'])
if (record_id in reco2path):
assert (wav_path == reco2path[record_id])
else:
reco2path[record_id] = wav_path
if (record_id not in reco2dur):
reco2dur[record_id] = duration
else:
assert (reco2dur[record_id] == duration)
if (utt_id not in utt2spk):
utt2spk[utt_id] = str(speaker)
else:
assert (utt2spk[utt_id] == str(speaker))
if (speaker not in spk2utt):
spk2utt[speaker] = []
spk2utt[speaker].append(utt_id)
segments.append((utt_id, record_id, str(start_sec), str(end_sec)))
with (data_dir / 'wav.scp').open('w') as f:
f.writelines([f'''{reco} {path}
''' for (reco, path) in reco2path.items()])
with (data_dir / 'reco2dur').open('w') as f:
f.writelines([f'''{reco} {dur}
''' for (reco, dur) in reco2dur.items()])
with (data_dir / 'utt2spk').open('w') as f:
f.writelines([f'''{utt} {spk}
''' for (utt, spk) in utt2spk.items()])
with (data_dir / 'spk2utt').open('w') as f:
f.writelines([f'''{spk} {' '.join(utts)}
''' for (spk, utts) in spk2utt.items()])
with (data_dir / 'segments').open('w') as f:
f.writelines([f'''{utt} {record} {start} {end}
''' for (utt, record, start, end) in segments])
|
def kaldi_dir_to_csv(data_dir: str, csv: str):
logger.info(f'Convert kaldi data directory {data_dir} into csv {csv}')
data_dir: Path = Path(data_dir)
assert (data_dir / 'wav.scp').is_file()
assert (data_dir / 'segments').is_file()
assert (data_dir / 'utt2spk').is_file()
assert (data_dir / 'reco2dur').is_file()
reco2path = {}
with (data_dir / 'wav.scp').open() as f:
for line in f.readlines():
line = line.strip()
(reco, path) = line.split(' ')
reco2path[reco] = path
reco2dur = {}
with (data_dir / 'reco2dur').open() as f:
for line in f.readlines():
line = line.strip()
(reco, duration) = line.split(' ')
reco2dur[reco] = float(duration)
utt2spk = {}
with (data_dir / 'utt2spk').open() as f:
for line in f.readlines():
line = line.strip()
(utt, spk) = line.split(' ')
utt2spk[utt] = spk
row = []
with (data_dir / 'segments').open('r') as f:
for line in f.readlines():
line = line.strip()
(utt, reco, start, end) = line.split(' ')
row.append((reco, reco2path[reco], reco2dur[reco], utt, utt2spk[utt], float(start), float(end)))
(recos, wav_paths, durations, utts, spks, starts, ends) = zip(*row)
pd.DataFrame(data=dict(record_id=recos, wav_path=wav_paths, utt_id=utts, speaker=spks, start_sec=starts, end_sec=ends, duration=durations)).to_csv(csv, index=False)
|
class BeijingOpera(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class CremaD(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Dcase2016Task2(HearTimestamp):
@default_cfg(**HearTimestamp.setup.default_except(corpus=dict(CLS=field(dcase_2016_task2, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), output_size=11, hidden_layers=2), task=dict(CLS=field(EventPredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='multilabel', scores=['event_onset_200ms_fms', 'segment_1s_er'], postprocessing_grid={'median_filter_ms': [250], 'min_duration': [125, 250]})))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearTimestamp.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='event_onset_200ms_fms', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**HearTimestamp.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearTimestamp.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class ESC50(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(total_steps=4000, log_step=100, eval_step=500, save_step=100, valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class FSD50k(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=newdict(CLS=FixedBatchSizeBatchSampler, batch_size=10, shuffle=True), task=dict(prediction_type='multilabel', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(total_steps=40000, log_step=100, eval_step=1000, save_step=100, valid_metric='mAP', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class GSC5hr(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class Gtzan(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class GtzanMusicSpeech(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Gunshot(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class LibriCount(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Maestro(HearTimestamp):
@default_cfg(**HearTimestamp.setup.default_except(corpus=dict(CLS=field(maestro, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]')), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), output_size=87, hidden_layers=2), task=dict(CLS=field(EventPredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='multilabel', scores=['event_onset_50ms_fms', 'event_onset_offset_50ms_20perc_fms'], postprocessing_grid={'median_filter_ms': [150], 'min_duration': [50]})))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearTimestamp.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='event_onset_50ms_fms', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**HearTimestamp.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearTimestamp.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
"\n Except 'num_fold', all other fields are for 'run' for every fold. That is, all folds shared the same\n config (training hypers, dataset root, etc) except 'workspace' and 'test_fold' are different\n "
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml')
|
class Nsynth5hr(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['pitch_acc', 'chroma_acc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='pitch_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class HearScene(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field(FixedBatchSizeBatchSampler, '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str), batch_size='???'), valid_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='hubert', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option'), legacy=True), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), hidden_layers=2, pooling='mean'), task=dict(CLS=field(ScenePredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='???', scores='???'))
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus().split(3)
stats = corpus_stats.add(stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.override(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **dict(workspace.environ))
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
@default_cfg(**Trainer.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='???', valid_higher_better='???')))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**Trainer.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**Problem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
"\n Except 'num_fold', all other fields are for 'run' for every fold. That is, all folds shared the same\n config (training hypers, dataset root, etc) except 'workspace' and 'test_fold' are different\n "
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml')
|
class Stroke(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class HearTimestamp(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field(HearTimestampDatapipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field(FixedBatchSizeBatchSampler, '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str), batch_size=5), valid_datapipe=dict(CLS=field(HearTimestampDatapipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=GroupSameItemSampler, item_name='unchunked_id', item_order_name='chunk_index'), test_datapipe=dict(CLS=field(HearTimestampDatapipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=GroupSameItemSampler, item_name='unchunked_id', item_order_name='chunk_index'), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='hubert', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option')), downstream=dict(CLS=field('???', '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str)), task=dict(CLS=field('???', '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str)))
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus().split(3)
stats = corpus_stats.add(stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.override(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **dict(workspace.environ))
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
|
class Tonic(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=N_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(N_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Vocal(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold='???', num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='mAP', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class VoxLingua(HearScene):
@default_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**HearScene.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
super().cross_validation(**cfg)
|
class Apc(SslProblem):
'\n Apc pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=RnnApc, input_size=_input_size, num_layers=3, hidden_size=512, dropout=0.1, residual=True), predictor=dict(_cls=PredictorIdentity), task=dict(_cls=AutoregressiveReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Apc problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0001), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
setup_problem_cfg = workspace.get_cfg(cls.setup_problem)
setup_problem_cfg['upstream'].pop('_cls')
setup_problem_cfg['upstream'].pop('input_size')
apc_config = dict(model=dict(paras=setup_problem_cfg['upstream']), data=dict(audio=_audio_config))
all_states = dict(config=apc_config, model=task.upstream.state_dict(), Upstream_Config=apc_config)
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class AudioAlbert(SslProblem):
'\n AudioAlbert pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the AudioAlbert problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0)))
all_states['Upstream_Config']['audio']['target_level'] = _pretrain_task_pipe_config['target_level']
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class SslProblem(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(_cls=field('???', '\nThe corpus class. You can add the **kwargs right below this _cls key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(_cls=field('???', '\nThe datapipe class to be applied to the corpus. You can add the **kwargs right below this _cls key', str)), train_sampler=dict(_cls=field('???', '\nThe batch sampler class. You can add the **kwargs right below this _cls key', str)), valid_datapipe=dict(_cls=field('???', '\nThe datapipe class to be applied to the corpus. You can add the **kwargs right below this _cls key', str)), valid_sampler=dict(_cls=field('???', '\nThe batch sampler class. You can add the **kwargs right below this _cls key', str)), test_datapipe=dict(_cls=field('???', '\nThe datapipe class to be applied to the corpus. You can add the **kwargs right below this _cls key', str)), test_sampler=dict(_cls=field('???', '\nThe batch sampler class. You can add the **kwargs right below this _cls key', str)), upstream=dict(_cls=field(S3PRLUpstream, '\nThe class of the upstream NN model. You can add the **kwargs right below this _cls key', str)), predictor=dict(_cls=field('???', '\nThe class of the predictor NN model class for pre-train. You can add the **kwargs right below this _cls key', str)), task=dict(_cls=field('???', '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this _cls key', str)))
@classmethod
def setup_problem(cls, **cfg):
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
if (not isinstance(cfg.upstream, nn.Module)):
upstream = cfg.upstream._cls(**cfg.upstream.kwds())
else:
upstream = cfg.upstream
stats = Container()
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus._cls(**cfg.corpus.kwds()).split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = AugmentedDynamicItemDataset(train_data, tools=stats)
train_dataset = cfg.train_datapipe._cls(**cfg.train_datapipe.kwds())(train_dataset)
train_sampler = cfg.train_sampler._cls(train_dataset, **cfg.train_sampler.kwds())
stats.add(train_dataset.all_tools())
logger.info('Preparing valid data')
valid_dataset = AugmentedDynamicItemDataset(valid_data, tools=stats)
valid_dataset = cfg.valid_datapipe._cls(**cfg.valid_datapipe.kwds())(valid_dataset)
valid_sampler = cfg.valid_sampler._cls(valid_dataset, **cfg.valid_sampler.kwds())
logger.info('Preparing test data')
test_dataset = AugmentedDynamicItemDataset(test_data, tools=stats)
test_dataset = cfg.test_datapipe._cls(**cfg.test_datapipe.kwds())(test_dataset)
test_sampler = cfg.test_sampler._cls(test_dataset, **cfg.test_sampler.kwds())
logger.info('Preparing model and task')
predictor = cfg.predictor._cls(**stats, **cfg.predictor.kwds())
task = cfg.task._cls(upstream, predictor, workspace=workspace, **stats, **cfg.task.kwds())
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
workspace.environ.update(stats)
|
class Mockingjay(SslProblem):
'\n Mockingjay pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Mockingjay problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0)))
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class Npc(SslProblem):
'\n Npc pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=CnnNpc, input_size=_input_size, kernel_size=15, mask_size=5, n_blocks=4, hidden_size=512, dropout=0.1, residual=True, batch_norm=True, activate='relu', disable_cross_layer=False, vq=dict(codebook_size=[64, 64, 64, 64], code_dim=[128, 128, 128, 128], gumbel_temperature=1.0)), predictor=dict(_cls=PredictorIdentity), task=dict(_cls=FeatReconstructionTask, loss=L1Loss, loss_config=dict(reduction='mean')))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Npc problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
setup_problem_cfg = workspace.get_cfg(cls.setup_problem)
setup_problem_cfg['upstream'].pop('_cls')
setup_problem_cfg['upstream'].pop('input_size')
apc_config = dict(model=dict(paras=setup_problem_cfg['upstream']), data=dict(audio=_audio_config))
all_states = dict(config=apc_config, model=task.upstream.state_dict(), Upstream_Config=apc_config)
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class Tera(SslProblem):
'\n Tera pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the Tera problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0)))
all_states['Upstream_Config']['audio']['target_level'] = _pretrain_task_pipe_config['target_level']
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class VqApc(SslProblem):
'\n VqApc pre-train problem\n '
@override_parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=_pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=_pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=_pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=RnnApc, input_size=_input_size, num_layers=3, hidden_size=512, dropout=0.1, residual=True, vq=dict(codebook_size=[512], code_dim=[512], gumbel_temperature=0.5)), predictor=dict(_cls=PredictorIdentity), task=dict(_cls=AutoregressiveReconstructionTask, loss=L1Loss))
@classmethod
def setup_problem(cls, **cfg):
'\n This setups the VqApc problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup_problem(**cfg)
@override_parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0001), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task):
setup_problem_cfg = workspace.get_cfg(cls.setup_problem)
setup_problem_cfg['upstream'].pop('_cls')
setup_problem_cfg['upstream'].pop('input_size')
apc_config = dict(model=dict(paras=setup_problem_cfg['upstream']), data=dict(audio=_audio_config))
all_states = dict(config=apc_config, model=task.upstream.state_dict(), Upstream_Config=apc_config)
torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt'))
@override_parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference'))
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
class SuperbASR(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=librispeech_for_speech2text, dataset_root='???'), train_datapipe=dict(CLS=Speech2TextPipe, generate_tokenizer=True), train_sampler=dict(CLS=SortedBucketingSampler, batch_size=32, max_length=2000, shuffle=True), valid_datapipe=dict(CLS=Speech2TextPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=Speech2TextPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=ModelWithSpecaug, model_cfg=dict(CLS=RNNEncoder, module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_cfg=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), task=dict(CLS=Speech2TextCTCTask)))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='wer', valid_higher_better=False)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbProblem(Problem, Trainer):
@default_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field('???', '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field('???', '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str)), valid_datapipe=dict(CLS=field('???', '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=field('???', '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str)), test_datapipe=dict(CLS=field('???', '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=field('???', '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str)), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='???', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option')), downstream=dict(CLS=field('???', '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str)), task=dict(CLS=field('???', '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str)))
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = Container(cfg.corpus()).split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.add(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **stats)
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
|
class SuperbER(SuperbProblem):
'\n Superb Emotion Classification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=iemocap_for_superb, dataset_root='???', test_fold=field('???', 'The session in IEMOCAP used for testing.\nThe other sessions will be used for training and validation.')), train_datapipe=dict(CLS=UtteranceClassificationPipe, train_category_encoder=True), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=4, shuffle=True), valid_datapipe=dict(CLS=UtteranceClassificationPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=4), test_datapipe=dict(CLS=UtteranceClassificationPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=4), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask)))
@classmethod
def setup(cls, **cfg):
'\n This setups the ER problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=30000, log_step=500, eval_step=1000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=8, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
@classmethod
def cross_validation(cls, **cfg):
"\n Except 'num_fold', all other fields are for 'run' for every fold. That is, all folds shared the same\n config (training hypers, dataset root, etc) except 'workspace' and 'test_fold' are different\n "
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml')
|
class SuperbIC(SuperbProblem):
'\n Superb Intent Classification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=fsc_for_multiple_classfication, dataset_root='???'), train_datapipe=dict(CLS=UtteranceMultipleCategoryClassificationPipe, train_category_encoder=True), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32, shuffle=True), valid_datapipe=dict(CLS=UtteranceMultipleCategoryClassificationPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), test_datapipe=dict(CLS=UtteranceMultipleCategoryClassificationPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceMultiClassClassificationTask)))
@classmethod
def setup(cls, **cfg):
'\n This setups the IC problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=250, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbKS(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=gsc_v1_for_superb, dataset_root='???'), train_datapipe=dict(CLS=UtteranceClassificationPipe, train_category_encoder=True, sox_effects=EFFECTS), train_sampler=dict(CLS=BalancedWeightedSampler, batch_size=32), valid_datapipe=dict(CLS=UtteranceClassificationPipe, sox_effects=EFFECTS), valid_sampler=dict(CLS=BalancedWeightedSampler, batch_size=32), test_datapipe=dict(CLS=UtteranceClassificationPipe, sox_effects=EFFECTS), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask)))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume'))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbPR(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=librispeech_for_speech2text, dataset_root='???'), train_datapipe=dict(CLS=Speech2PhonemePipe), train_sampler=dict(CLS=SortedSliceSampler, batch_size=16, max_length=300000), valid_datapipe=dict(CLS=Speech2PhonemePipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8), test_datapipe=dict(CLS=Speech2PhonemePipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8), downstream=dict(CLS=FrameLevelLinear), task=dict(CLS=Speech2TextCTCTask, log_metrics=['per'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.01), trainer=dict(total_steps=100000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate_steps=2, valid_metric='per', valid_higher_better=False)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbSDDatapipe(SequentialDataPipe):
def __init__(self, feat_frame_shift: int, sample_rate: int=16000, **kwds):
super().__init__(UnfoldChunkByFrame(min_chunk_frames=2000, max_chunk_frames=2000, step_frames=2000, feat_frame_shift=feat_frame_shift, sample_rate=sample_rate), BuildMultiClassTagging(sample_rate=sample_rate, feat_frame_shift=feat_frame_shift), LoadAudio(audio_sample_rate=sample_rate), SetOutputKeys(x='wav', x_len='wav_len', label='multiclass_tag', label_len='tag_len', rec_id='unchunked_id', order_in_rec='chunk_index'))
|
def prediction_numpy_to_segment_secs(prediction: np.ndarray, threshold: float=0.5, median_filter: int=1, frame_shift: int=160, subsampling: int=1, sampling_rate: int=16000):
'\n prediction: (timestamps, class_num), all values are in 0~1\n '
hard_pred = np.where((prediction > threshold), 1, 0)
if (median_filter > 1):
hard_pred = medfilt(hard_pred, (median_filter, 1))
factor = ((frame_shift * subsampling) / sampling_rate)
segments = dict()
for (classid, frames) in enumerate(hard_pred.T):
frames = np.pad(frames, (1, 1), 'constant')
(changes,) = np.where((np.diff(frames, axis=0) != 0))
if (len(changes) > 0):
class_name = str(classid)
segments[class_name] = []
for (s, e) in zip(changes[::2], changes[1::2]):
start = (s * factor)
end = (e * factor)
segments[class_name].append((start, end))
return segments
|
class SuperbSD(SuperbProblem):
'\n Superb Intent Classification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=kaldi_for_multiclass_tagging, dataset_root='???'), train_datapipe=dict(CLS=SuperbSDDatapipe, train_category_encoder=True), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8, shuffle=True), valid_datapipe=dict(CLS=SuperbSDDatapipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=SuperbSDDatapipe), test_sampler=dict(CLS=GroupSameItemSampler, item_name='unchunked_id', item_order_name='chunk_index'), downstream=dict(CLS=SuperbDiarizationModel, output_size=2, hidden_size=512, rnn_layers=1), task=dict(CLS=DiarizationPIT)))
@classmethod
def setup(cls, **cfg):
'\n This setups the IC problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=30000, log_step=500, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=4, valid_metric='der', valid_higher_better=False)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(workspace='???', prediction=field('prediction', 'The directory name under the workspace containing all the predicted numpy'), test_data=field('test_data', 'The testing data (in dict) under this workspace'), median_filters=field([1, 11], 'The median filter sizes to try when scoring'), thresholds=field([0.3, 0.4, 0.5, 0.6, 0.7], 'The threshold to try when determining 0/1 hard prediction.\nThe raw predictions are all between 0~1\n'), frame_shift=field(None, 'The frame shift of the prediction np.ndarray. Used to map the frame-level prediction back to seconds', int))
@classmethod
def scoring(cls, **cfg):
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
frame_shift = (cfg.frame_shift or workspace.environ['feat_frame_shift'])
test_data: dict = workspace[cfg.test_data]
test_segments = {reco: data_point['segments'] for (reco, data_point) in test_data.items()}
test_rttm = workspace.put(test_segments, 'test_rttm', 'rttm')
rttm_dir = (workspace / 'rttm')
scoring_dir = (workspace / 'scoring')
scoring_dir.mkdir(exist_ok=True, parents=True)
all_ders = []
reco2pred = {}
for p in tqdm((workspace / cfg.prediction).files(), desc='Load prediction'):
reco2pred[p] = (workspace / cfg.prediction)[p]
for median_filter in cfg.median_filters:
for threshold in cfg.thresholds:
logger.info(f'Decode prediction numpy array with the setting: median filter={median_filter}, threshold={threshold}')
all_segments = dict()
workspace = Workspace(workspace)
at_least_one_segment = False
for p in tqdm((workspace / cfg.prediction).files(), desc='prediction to seconds'):
segments = prediction_numpy_to_segment_secs(reco2pred[p], threshold, median_filter, frame_shift)
if (len(segments) > 0):
at_least_one_segment = True
all_segments[p] = segments
if (not at_least_one_segment):
logger.info('No segments found under this decoding setting')
continue
identifier = f'hyp_threshold-{threshold}_median-{median_filter}'
hyp_rttm = rttm_dir.put(all_segments, identifier, 'rttm')
overall_der = cls.score_with_dscore(dscore_dir=(workspace / 'dscore'), hyp_rttm=hyp_rttm, gt_rttm=test_rttm, score_file=Path((scoring_dir / identifier)))
logger.info(f'Overall DER with median_filter {median_filter} and threshold {threshold}: {overall_der}')
all_ders.append(overall_der)
all_ders.sort()
best_der = all_ders[0]
logger.info(f'Best DER on test data: {best_der}')
workspace.put(dict(der=best_der), 'test_metric', 'yaml')
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference', 'scoring'], start_stage='setup', final_stage='scoring', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun'), scoring=scoring.default_cfg.deselect('workspace')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
@default_cfg(dscore_dir=field('???', "The directory containing the 'dscore' repository"), hyp_rttm=field('???', 'The hypothesis rttm file'), gt_rttm=field('???', 'The ground truth rttm file'), score_file=field('???', 'The scored result file'))
@classmethod
def score_with_dscore(cls, **cfg) -> float:
"\n This function returns the overall DER score, and will also write the detailed scoring results\n to 'score_file'\n "
cfg = Container(cfg)
dscore_dir = Workspace(cfg.dscore_dir)
if ((not dscore_dir.is_dir()) or ('score' not in dscore_dir.files())):
subprocess.check_output(f'git clone https://github.com/nryant/dscore.git {dscore_dir}', shell=True).decode('utf-8')
result = subprocess.check_call(f'python3 {dscore_dir}/score.py -r {cfg.gt_rttm} -s {cfg.hyp_rttm} > {cfg.score_file}', shell=True)
assert (result == 0), 'The scoring step fail.'
with open(cfg.score_file) as file:
lines = file.readlines()
overall_lines = [line for line in lines if ('OVERALL' in line)]
assert (len(overall_lines) == 1)
overall_line = overall_lines[0]
overall_line = re.sub('\t+', ' ', overall_line)
overall_line = re.sub(' +', ' ', overall_line)
overall_der = float(overall_line.split(' ')[3])
return overall_der
|
class SuperbSF(SuperbProblem):
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=snips_for_speech2text, dataset_root='???'), train_datapipe=dict(CLS=Speech2TextPipe, generate_tokenizer=True, vocab_type='character-slot', vocab_file=_urls_to_filepaths(VOCAB_URL), slots_file=_urls_to_filepaths(SLOTS_URL)), train_sampler=dict(CLS=SortedSliceSampler, batch_size=32, max_length=300000), valid_datapipe=dict(CLS=Speech2TextPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=Speech2TextPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=ModelWithSpecaug, model_cfg=dict(CLS=RNNEncoder, module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_cfg=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), task=dict(CLS=Speech2TextCTCTask, log_metrics=['wer', 'cer', 'slot_type_f1', 'slot_value_cer', 'slot_value_wer', 'slot_edit_f1_full', 'slot_edit_f1_part'])))
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='slot_type_f1', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbSIDTrainPipe(DataPipe):
def __init__(self, train_category_encoder: bool=False, max_secs: float=None) -> None:
self.pipes = SequentialDataPipe(UtteranceClassificationPipe(train_category_encoder=train_category_encoder), RandomCrop(max_secs=max_secs), SetOutputKeys(dict(x='wav_crop', x_len='wav_crop_len')))
def forward(self, dataset):
dataset = self.pipes(dataset)
return dataset
|
class SuperbSID(SuperbProblem):
'\n Superb SID\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=voxceleb1_for_utt_classification, dataset_root='???'), train_datapipe=dict(CLS=SuperbSIDTrainPipe, train_category_encoder=True, max_secs=8.0), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=8, shuffle=True), valid_datapipe=dict(CLS=UtteranceClassificationPipe), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=UtteranceClassificationPipe), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask)))
@classmethod
def setup(cls, **cfg):
'\n This setups the IC problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=500, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=4, valid_metric='accuracy', valid_higher_better=True)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
class SuperbSV(SuperbProblem):
'\n Superb Speaker Verification problem\n '
@default_cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=voxceleb1_for_sv, dataset_root='???'), train_datapipe=dict(CLS=SpeakerVerificationPipe, random_crop_secs=8.0, sox_effects=EFFECTS), train_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=10, shuffle=True), valid_datapipe=dict(CLS=SpeakerVerificationPipe, sox_effects=EFFECTS), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=SpeakerVerificationPipe, sox_effects=EFFECTS), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), downstream=dict(CLS=SuperbXvector), task=dict(CLS=SpeakerVerification, loss_type='amsoftmax', loss_cfg=dict(margin=0.4, scale=30))))
@classmethod
def setup(cls, **cfg):
'\n This setups the ASV problem, containing train/valid/test datasets & samplers and a task object\n '
super().setup(**cfg)
@default_cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.AdamW', lr=0.0001), trainer=dict(total_steps=200000, log_step=500, eval_step=field(10000000000.0, 'ASV do not use validation set'), save_step=20000, gradient_clipping=1000.0, gradient_accumulate_steps=5, valid_metric='eer', valid_higher_better=False, max_keep=10)))
@classmethod
def train(cls, **cfg):
'\n Train the setup problem with the train/valid datasets & samplers and the task object\n '
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_except(inference_steps=field([20000, 40000, 60000, 80000, 100000, 120000, 140000, 160000, 180000, 200000], 'The steps used for inference\n', 'egs: [900, 1000] - use the checkpoint of 90 and 100 steps for inference')))
@classmethod
def inference(cls, **cfg):
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
dataset = workspace[f'{cfg.split_name}_dataset']
sampler = workspace[f'{cfg.split_name}_sampler']
dataloader = DataLoader(dataset, sampler, num_workers=cfg.n_jobs)
with torch.no_grad():
all_eers = []
for step in cfg.inference_steps:
step_dir = (workspace / f'step-{step}')
task = step_dir['task']
task = task.to(cfg.device)
task.eval()
test_results = []
for (batch_idx, batch) in enumerate(tqdm(dataloader, desc='Test', total=len(dataloader))):
batch = batch.to(cfg.device)
result = task.test_step(**batch)
test_results.append(result.cacheable())
logs: Logs = task.test_reduction(test_results).logs
logger.info(f'Step {step}')
metrics = {key: value for (key, value) in logs.scalars()}
step_dir.put(metrics, 'test_metrics', 'yaml')
for (key, value) in metrics.items():
logger.info(f'{key}: {value}')
all_eers.append(metrics['EER'])
workspace.put({'minEER': min(all_eers)}, 'test_metrics', 'yaml')
@default_cfg(**SuperbProblem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume', 'dryrun'), train=train.default_cfg.deselect('workspace', 'resume', 'dryrun'), inference=inference.default_cfg.deselect('workspace', 'resume', 'dryrun')))
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
def get_downstream_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices=['train', 'evaluate', 'inference'], required=True)
parser.add_argument('-t', '--evaluate_split', default='test')
parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority')
parser.add_argument('--backend', default='nccl', help='The backend for distributed training')
parser.add_argument('--local_rank', type=int, help=f'The GPU id this process should use while distributed training. None when not launched by torch.distributed.launch')
parser.add_argument('-e', '--past_exp', metavar='{CKPT_PATH,CKPT_DIR}', help='Resume training from a checkpoint')
parser.add_argument('-i', '--init_ckpt', metavar='CKPT_PATH', help='Load the checkpoint for evaluation')
parser.add_argument('-c', '--config', help='The yaml file for configuring the whole experiment except the upstream model')
parser.add_argument('-d', '--downstream', help=' Typically downstream dataset need manual preparation. Please check downstream/README.md for details')
parser.add_argument('-v', '--downstream_variant', help='Downstream vairants given the same expert')
parser.add_argument('--hub', default='torch', choices=['torch', 'huggingface'], help='The model Hub used to retrieve the upstream model.')
upstreams = [attr for attr in dir(hub) if (attr[0] != '_')]
parser.add_argument('-u', '--upstream', help=f'Upstreams with "_local" or "_url" postfix need local ckpt (-k) or config file (-g). Other upstreams download two files on-the-fly and cache them, so just -u is enough and -k/-g are not needed. Please check upstream/README.md for details. Available options in S3PRL: {upstreams}. ')
parser.add_argument('-k', '--upstream_ckpt', metavar='{PATH,URL,GOOGLE_DRIVE_ID}', help='Only set when the specified upstream need it')
parser.add_argument('-g', '--upstream_model_config', help='The config file for constructing the pretrained model')
parser.add_argument('-r', '--upstream_refresh', action='store_true', help='Re-download cached ckpts for on-the-fly upstream variants')
parser.add_argument('-f', '--upstream_trainable', action='store_true', help='Fine-tune, set upstream.train(). Default is upstream.eval()')
parser.add_argument('-s', '--upstream_feature_selection', default='hidden_states', help='Specify the layer to be extracted as the representation')
parser.add_argument('-l', '--upstream_layer_selection', type=int, help='Select a specific layer for the features selected by -s')
parser.add_argument('--upstream_feature_normalize', action='store_true', help='Specify whether to normalize hidden features before weighted sum')
parser.add_argument('--upstream_model_name', default='model.pt', help='The name of the model file in the HuggingFace Hub repo.')
parser.add_argument('--upstream_revision', help='The commit hash of the specified HuggingFace Repository')
parser.add_argument('-n', '--expname', help='Save experiment at result/downstream/expname')
parser.add_argument('-p', '--expdir', help='Save experiment at expdir')
parser.add_argument('-a', '--auto_resume', action='store_true', help='Auto-resume if the expdir contains checkpoints')
parser.add_argument('--push_to_hf_hub', default=False, help='Push all files in experiment directory to the Hugging Face Hub. To use this feature you must set HF_USERNAME and HF_PASSWORD as environment variables in your shell')
parser.add_argument('--hf_hub_org', help='The Hugging Face Hub organisation to push fine-tuned models to')
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--device', default='cuda', help='model.to(device)')
parser.add_argument('--cache_dir', help='The cache directory for pretrained model downloading')
parser.add_argument('--verbose', action='store_true', help='Print model infomation')
parser.add_argument('--disable_cudnn', action='store_true', help='Disable CUDNN')
args = parser.parse_args()
backup_files = []
if (args.expdir is None):
args.expdir = f'result/downstream/{args.expname}'
if args.auto_resume:
if os.path.isdir(args.expdir):
ckpt_pths = glob.glob(f'{args.expdir}/states-*.ckpt')
if (len(ckpt_pths) > 0):
args.past_exp = args.expdir
if args.past_exp:
if os.path.isdir(args.past_exp):
ckpt_pths = glob.glob(f'{args.past_exp}/states-*.ckpt')
assert (len(ckpt_pths) > 0)
ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0])))
ckpt_pth = ckpt_pths[(- 1)]
else:
ckpt_pth = args.past_exp
print(f'[Runner] - Resume from {ckpt_pth}')
ckpt = torch.load(ckpt_pth, map_location='cpu')
def update_args(old, new, preserve_list=None):
out_dict = vars(old)
new_dict = vars(new)
for key in list(new_dict.keys()):
if (key in preserve_list):
new_dict.pop(key)
out_dict.update(new_dict)
return Namespace(**out_dict)
cannot_overwrite_args = ['mode', 'evaluate_split', 'override', 'backend', 'local_rank', 'past_exp', 'device']
args = update_args(args, ckpt['Args'], preserve_list=cannot_overwrite_args)
os.makedirs(args.expdir, exist_ok=True)
args.init_ckpt = ckpt_pth
config = ckpt['Config']
else:
print('[Runner] - Start a new experiment')
os.makedirs(args.expdir, exist_ok=True)
if (args.config is None):
args.config = f'./downstream/{args.downstream}/config.yaml'
with open(args.config, 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
if ((args.upstream_model_config is not None) and os.path.isfile(args.upstream_model_config)):
backup_files.append(args.upstream_model_config)
if ((args.override is not None) and (args.override.lower() != 'none')):
override(args.override, args, config)
os.makedirs(args.expdir, exist_ok=True)
return (args, config, backup_files)
|
def main():
logging.basicConfig(level=logging.INFO)
torch.multiprocessing.set_sharing_strategy('file_system')
torchaudio.set_audio_backend('sox_io')
hack_isinstance()
(args, config, backup_files) = get_downstream_args()
if (args.cache_dir is not None):
torch.hub.set_dir(args.cache_dir)
if (args.local_rank is not None):
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(args.backend)
if ((args.mode == 'train') and args.past_exp):
ckpt = torch.load(args.init_ckpt, map_location='cpu')
now_use_ddp = is_initialized()
original_use_ddp = (ckpt['Args'].local_rank is not None)
assert (now_use_ddp == original_use_ddp), f'{now_use_ddp} != {original_use_ddp}'
if now_use_ddp:
now_world = get_world_size()
original_world = ckpt['WorldSize']
assert (now_world == original_world), f'{now_world} != {original_world}'
if (args.hub == 'huggingface'):
args.from_hf_hub = True
hf_user = os.environ.get('HF_USERNAME')
hf_password = os.environ.get('HF_PASSWORD')
huggingface_token = HfApi().login(username=hf_user, password=hf_password)
HfFolder.save_token(huggingface_token)
print(f'Logged into Hugging Face Hub with user: {hf_user}')
if is_leader_process():
with open(os.path.join(args.expdir, f'args_{get_time_tag()}.yaml'), 'w') as file:
yaml.dump(vars(args), file)
with open(os.path.join(args.expdir, f'config_{get_time_tag()}.yaml'), 'w') as file:
yaml.dump(config, file)
for file in backup_files:
backup(file, args.expdir)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
if args.disable_cudnn:
torch.backends.cudnn.enabled = False
else:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
runner = Runner(args, config)
eval(f'runner.{args.mode}')()
|
def get_pretrain_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--past_exp', metavar='{CKPT_PATH,CKPT_DIR}', help='Resume training from a checkpoint')
parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority')
parser.add_argument('-c', '--config', metavar='CONFIG_PATH', help='The yaml file for configuring the whole experiment, except the upstream model')
parser.add_argument('-u', '--upstream', choices=os.listdir('pretrain/'))
parser.add_argument('-g', '--upstream_config', metavar='CONFIG_PATH', help='The yaml file for configuring the upstream model')
parser.add_argument('-n', '--expname', help='Save experiment at expdir/expname')
parser.add_argument('-p', '--expdir', help='Save experiment at expdir')
parser.add_argument('-a', '--auto_resume', action='store_true', help='Auto-resume if the expdir contains checkpoints')
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--device', default='cuda', help='model.to(device)')
parser.add_argument('--multi_gpu', action='store_true', help='Enables multi-GPU training')
args = parser.parse_args()
if (args.expdir is None):
args.expdir = f'result/pretrain/{args.expname}'
if args.auto_resume:
if os.path.isdir(args.expdir):
ckpt_pths = glob.glob(f'{args.expdir}/states-*.ckpt')
if (len(ckpt_pths) > 0):
args.past_exp = args.expdir
if args.past_exp:
if os.path.isdir(args.past_exp):
ckpt_pths = glob.glob(f'{args.past_exp}/states-*.ckpt')
assert (len(ckpt_pths) > 0)
ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0])))
ckpt_pth = ckpt_pths[(- 1)]
else:
ckpt_pth = args.past_exp
print(f'[Runner] - Resume from {ckpt_pth}')
ckpt = torch.load(ckpt_pth, map_location='cpu')
def update_args(old, new):
old_dict = vars(old)
new_dict = vars(new)
old_dict.update(new_dict)
return Namespace(**old_dict)
args = update_args(args, ckpt['Args'])
os.makedirs(args.expdir, exist_ok=True)
args.init_ckpt = ckpt_pth
config = ckpt['Config']
else:
print('[Runner] - Start a new experiment')
args.init_ckpt = None
assert (args.expname is not None)
if (args.expdir is None):
args.expdir = f'result/pretrain/{args.expname}'
os.makedirs(args.expdir, exist_ok=True)
upstream_dirs = [u for u in os.listdir('pretrain/') if re.search(f'^{u}_|^{u}$', args.upstream)]
assert (len(upstream_dirs) == 1)
if (args.config is None):
args.config = f'pretrain/{upstream_dirs[0]}/config_runner.yaml'
with open(args.config, 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
if os.path.isfile(args.config):
copyfile(args.config, f'{args.expdir}/config_runner.yaml')
else:
raise FileNotFoundError('Wrong file path for runner config.')
if (args.upstream_config is None):
default_upstream_config = f'pretrain/{upstream_dirs[0]}/config_model.yaml'
assert os.path.isfile(default_upstream_config)
args.upstream_config = default_upstream_config
if os.path.isfile(args.upstream_config):
copyfile(args.upstream_config, f'{args.expdir}/config_model.yaml')
else:
raise FileNotFoundError('Wrong file path for model config.')
if ((args.override is not None) and (args.override.lower() != 'none')):
override(args.override, args, config)
os.makedirs(args.expdir, exist_ok=True)
return (args, config)
|
def main():
(args, config) = get_pretrain_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
runner = Runner(args, config)
eval('runner.train')()
runner.logger.close()
|
def get_scheduler(optimizer, total_steps, scheduler_config):
scheduler_config = copy.deepcopy(scheduler_config)
scheduler_name = scheduler_config.pop('name')
scheduler = eval(f'get_{scheduler_name}')(optimizer, num_training_steps=total_steps, **scheduler_config)
return scheduler
|
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=(- 1)):
'\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases\n linearly between 0 and the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`int`, `optional`, defaults to 1):\n The number of hard restarts to use.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
if (progress >= 1.0):
return 0.0
return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(num_cycles) * progress) % 1.0))))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=(- 1)):
'\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\n initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`float`, `optional`, defaults to 0.5):\n The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\n following a half-cosine).\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
return max(0.0, (0.5 * (1.0 + math.cos((((math.pi * float(num_cycles)) * 2.0) * progress)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_sqrt_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return (1.0 / math.sqrt(max(current_step, num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_constant_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_noam_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return ((768 ** (- 0.5)) * min((current_step ** (- 0.5)), (current_step * (num_warmup_steps ** (- 1.5)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, lr_end=1e-07, power=1.0, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the\n optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the\n initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n lr_end (:obj:`float`, `optional`, defaults to 1e-7):\n The end LR.\n power (:obj:`float`, `optional`, defaults to 1.0):\n Power factor.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Note: `power` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT\n implementation at\n https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
lr_init = optimizer.defaults['lr']
assert (lr_init > lr_end), f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})'
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
elif (current_step > num_training_steps):
return (lr_end / lr_init)
else:
lr_range = (lr_init - lr_end)
decay_steps = (num_training_steps - num_warmup_steps)
pct_remaining = (1 - ((current_step - num_warmup_steps) / decay_steps))
decay = ((lr_range * (pct_remaining ** power)) + lr_end)
return (decay / lr_init)
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def label_vocab_as_dict(df: pd.DataFrame, key: str, value: str) -> Dict:
'\n Returns a dictionary of the label vocabulary mapping the label column to\n the idx column. key sets whether the label or idx is the key in the dict. The\n other column will be the value.\n '
if (key == 'label'):
df['label'] = df['label'].astype(str)
value = 'idx'
else:
assert (key == 'idx'), "key argument must be either 'label' or 'idx'"
value = 'label'
return df.set_index(key).to_dict()[value]
|
def label_to_binary_vector(label: List, num_labels: int) -> torch.Tensor:
'\n Converts a list of labels into a binary vector\n Args:\n label: list of integer labels\n num_labels: total number of labels\n\n Returns:\n A float Tensor that is multi-hot binary vector\n '
if (len(label) == 0):
binary_labels = torch.zeros((num_labels,), dtype=torch.float)
else:
binary_labels = torch.zeros((num_labels,)).scatter(0, torch.tensor(label), 1.0)
assert (set(torch.where((binary_labels == 1.0))[0].numpy()) == set(label))
return binary_labels
|
def validate_score_return_type(ret: Union[(Tuple[(Tuple[(str, float)], ...)], float)]):
'\n Valid return types for the metric are\n - tuple(tuple(string: name of the subtype, float: the value)): This is the\n case with sed eval metrics. They can return (("f_measure", value),\n ("precision", value), ...), depending on the scores\n the metric should is supposed to return. This is set as `scores`\n attribute in the metric.\n - float: Standard metric behaviour\n\n The downstream prediction pipeline is able to handle these two types.\n In case of the tuple return type, the value of the first entry in the\n tuple will be used as an optimisation criterion wherever required.\n For instance, if the return is (("f_measure", value), ("precision", value)),\n the value corresponding to the f_measure will be used ( for instance in\n early stopping if this metric is the primary score for the task )\n '
if isinstance(ret, tuple):
assert all((((type(s) == tuple) and (type(s[0]) == str) and (type(s[1]) == float)) for s in ret)), 'If the return type of the score is a tuple, all the elements in the tuple should be tuple of type (string, float)'
elif isinstance(ret, float):
pass
else:
raise ValueError(f'Return type {type(ret)} is unexpected. Return type of the score function should either be a tuple(tuple) or float. ')
|
class ScoreFunction():
'\n A simple abstract base class for score functions\n '
def __init__(self, label_to_idx: Dict[(str, int)], name: Optional[str]=None, maximize: bool=True):
"\n :param label_to_idx: Map from label string to integer index.\n :param name: Override the name of this scoring function.\n :param maximize: Maximize this score? (Otherwise, it's a loss or energy\n we want to minimize, and I guess technically isn't a score.)\n "
self.label_to_idx = label_to_idx
if name:
self.name = name
self.maximize = maximize
def __call__(self, *args, **kwargs) -> Union[(Tuple[(Tuple[(str, float)], ...)], float)]:
'\n Calls the compute function of the metric, and after validating the output,\n returns the metric score\n '
ret = self._compute(*args, **kwargs)
validate_score_return_type(ret)
return ret
def _compute(self, predictions: Any, targets: Any, **kwargs) -> Union[(Tuple[(Tuple[(str, float)], ...)], float)]:
'\n Compute the score based on the predictions and targets.\n This is a private function and the metric should be used as a functor\n by calling the `__call__` method which calls this and also validates\n the return type\n '
raise NotImplementedError('Inheriting classes must implement this function')
def __str__(self):
return self.name
|
class Top1Accuracy(ScoreFunction):
name = 'top1_acc'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
correct = 0
for (target, prediction) in zip(targets, predictions):
assert (prediction.ndim == 1)
assert (target.ndim == 1)
predicted_class = np.argmax(prediction)
target_class = np.argmax(target)
if (predicted_class == target_class):
correct += 1
return (correct / len(targets))
|
class ChromaAccuracy(ScoreFunction):
'\n Score specifically for pitch detection -- converts all pitches to chroma first.\n This score ignores octave errors in pitch classification.\n '
name = 'chroma_acc'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
correct = 0
for (target, prediction) in zip(targets, predictions):
assert (prediction.ndim == 1)
assert (target.ndim == 1)
predicted_class = np.argmax(prediction)
target_class = np.argmax(target)
if ((predicted_class % 12) == (target_class % 12)):
correct += 1
return (correct / len(targets))
|
class SoundEventScore(ScoreFunction):
'\n Scores for sound event detection tasks using sed_eval\n '
score_class: sed_eval.sound_event.SoundEventMetrics = None
def __init__(self, label_to_idx: Dict[(str, int)], scores: Tuple[str], params: Dict=None, name: Optional[str]=None, maximize: bool=True):
'\n :param scores: Scores to use, from the list of overall SED eval scores.\n The first score in the tuple will be the primary score for this metric\n :param params: Parameters to pass to the scoring function,\n see inheriting children for details.\n '
if (params is None):
params = {}
super().__init__(label_to_idx=label_to_idx, name=name, maximize=maximize)
self.scores = scores
self.params = params
assert (self.score_class is not None)
def _compute(self, predictions: Dict, targets: Dict, **kwargs) -> Tuple[(Tuple[(str, float)], ...)]:
reference_event_list = self.sed_eval_event_container(targets)
estimated_event_list = self.sed_eval_event_container(predictions)
scores = self.score_class(event_label_list=list(self.label_to_idx.keys()), **self.params)
for filename in predictions:
scores.evaluate(reference_event_list=reference_event_list.filter(filename=filename), estimated_event_list=estimated_event_list.filter(filename=filename))
nested_overall_scores: Dict[(str, Dict[(str, float)])] = scores.results_overall_metrics()
overall_scores: Dict[(str, float)] = dict(ChainMap(*nested_overall_scores.values()))
return tuple([(score, overall_scores[score]) for score in self.scores])
@staticmethod
def sed_eval_event_container(x: Dict[(str, List[Dict[(str, Any)]])]) -> MetaDataContainer:
reference_events = []
for (filename, event_list) in x.items():
for event in event_list:
reference_events.append({'event_label': str(event['label']), 'event_onset': (event['start'] / 1000.0), 'event_offset': (event['end'] / 1000.0), 'file': filename})
return MetaDataContainer(reference_events)
|
class SegmentBasedScore(SoundEventScore):
'\n segment-based scores - the ground truth and system output are compared in a\n fixed time grid; sound events are marked as active or inactive in each segment;\n\n See https://tut-arg.github.io/sed_eval/sound_event.html#sed_eval.sound_event.SegmentBasedMetrics # noqa: E501\n for params.\n '
score_class = sed_eval.sound_event.SegmentBasedMetrics
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.