code stringlengths 17 6.64M |
|---|
def test_superb_pr():
with tempfile.TemporaryDirectory() as tempdir:
with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples):
class TestPR(SuperbPR):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
from s3prl.dataio.encoder.g2p import G2P
all_wav_paths = wav_paths
all_text = ['hello how are you today', 'fine', 'oh', 'I think is good', 'maybe okay']
g2p = G2P()
all_text = [g2p.encode(text.strip()) for text in all_text]
ids = list(range(len(all_wav_paths)))
df = pd.DataFrame(data={'id': ids, 'wav_path': all_wav_paths, 'transcription': all_text})
train_path = (Path(target_dir) / 'train.csv')
valid_path = (Path(target_dir) / 'valid.csv')
test_path = (Path(target_dir) / 'test.csv')
df.iloc[:3].to_csv(train_path, index=False)
df.iloc[3:4].to_csv(valid_path, index=False)
df.iloc[4:].to_csv(test_path, index=False)
return (train_path, valid_path, [test_path])
problem = TestPR()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config)
|
def test_superb_ic():
with tempfile.TemporaryDirectory() as tempdir:
with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples):
class TestIC(SuperbIC):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
ids = [Path(path).stem for path in wav_paths]
labels1 = ['a', 'b', 'a', 'c', 'd']
labels2 = ['1', '2', '3', '4', '5']
df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'labels': [f'{label1} ; {label2}' for (label1, label2) in zip(labels1, labels2)]})
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
df.to_csv(train_csv)
df.to_csv(valid_csv)
df.to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
problem = TestIC()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config)
|
def test_superb_sid():
with tempfile.TemporaryDirectory() as tempdir:
with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples):
class TestSID(SuperbSID):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
ids = [Path(path).stem for path in wav_paths]
label = ['a', 'b', 'a', 'c', 'd']
start_secs = [0.0, 0.1, 0.2, None, 0.0]
end_secs = [5.2, 1.0, 0.3, None, 4.9]
df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': label, 'start_sec': start_secs, 'end_sec': end_secs})
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
df.to_csv(train_csv)
df.to_csv(valid_csv)
df.to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
problem = TestSID()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config)
|
def test_superb_sd():
with tempfile.TemporaryDirectory() as tempdir:
secs = [10, 2, 1, 8, 5]
with pseudo_audio(secs) as (wav_paths, num_samples):
class TestSD(SuperbSD):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False):
record_id = [Path(path).stem for path in wav_paths]
durations = secs
speaker = ['a', 'b', 'a', 'a', 'b']
utt_id = record_id
start_secs = [0.0, 0.1, 0.2, 0.3, 0.0]
end_secs = [5.2, 1.0, 0.3, 5.4, 4.9]
df = pd.DataFrame(data={'record_id': record_id, 'wav_path': wav_paths, 'duration': durations, 'utt_id': utt_id, 'speaker': speaker, 'start_sec': start_secs, 'end_sec': end_secs})
train_csv = (Path(target_dir) / 'train.csv')
valid_csv = (Path(target_dir) / 'valid.csv')
test_csv = (Path(target_dir) / 'test.csv')
df.to_csv(train_csv)
df.to_csv(valid_csv)
df.to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
problem = TestSD()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config)
|
def test_superb_asv():
with tempfile.TemporaryDirectory() as tempdir:
secs = [10, 2, 1, 8, 5]
with pseudo_audio(secs) as (wav_paths, num_samples):
class TestASV(SuperbASV):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
train_csv = (Path(target_dir) / 'train.csv')
test_csv = (Path(target_dir) / 'test.csv')
ids = [Path(path).stem for path in wav_paths]
spk = ['a', 'b', 'c', 'a', 'b']
train_df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'spk': spk})
train_df.to_csv(train_csv)
id1 = [ids[0], ids[1], ids[2]]
id2 = [ids[1], ids[1], ids[2]]
wav_path1 = [wav_paths[0], wav_paths[1], wav_paths[2]]
wav_path2 = [wav_paths[1], wav_paths[1], wav_paths[2]]
labels = [0, 1, 1]
test_df = pd.DataFrame(data={'id1': id1, 'id2': id2, 'wav_path1': wav_path1, 'wav_path2': wav_path2, 'label': labels})
test_df.to_csv(test_csv)
return (train_csv, [test_csv])
problem = TestASV()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = math.inf
config['train']['save_step'] = 1
config['build_upstream']['name'] = 'fbank'
problem.run(**config)
|
@pytest.mark.parametrize('vocab_type', ['subword', 'character'])
def test_superb_sf(vocab_type):
if (vocab_type == 'subword'):
vocab_args = {'vocab_size': 22}
else:
vocab_args = {}
with tempfile.TemporaryDirectory() as tempdir:
with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples):
class TestSF(SuperbSF):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
all_wav_paths = wav_paths
all_text_with_iob = [('hello how are you today', 'O O O O timeRange'), ('fine thank you', 'condition O O'), ('oh nice', 'O condition'), ('I think is good', 'O O O genre'), ('maybe okay', 'O genre')]
(text, iob) = zip(*all_text_with_iob)
ids = list(range(len(all_wav_paths)))
df = pd.DataFrame(data={'id': ids, 'wav_path': all_wav_paths, 'transcription': text, 'iob': iob})
train_path = (Path(target_dir) / 'train.csv')
valid_path = (Path(target_dir) / 'valid.csv')
test_path = (Path(target_dir) / 'test.csv')
df.iloc[:3].to_csv(train_path, index=False)
df.iloc[3:4].to_csv(valid_path, index=False)
df.iloc[4:].to_csv(test_path, index=False)
return (train_path, valid_path, [test_path])
problem = TestSF()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_tokenizer'] = {'vocab_type': vocab_type, 'vocab_args': vocab_args}
config['build_upstream']['name'] = 'fbank'
problem.run(**config)
|
def test_audio_info():
with pseudo_audio([3.0, 4.1, 1.1]) as (paths, num_samples):
infos = get_audio_info(paths, [Path(path).stem for path in paths])
assert (infos[0]['num_frames'] == (3 * 16000))
|
@pytest.mark.parametrize('duplicate', [10000, 100000])
def test_balanced_weighted_sampler(duplicate: int):
labels = ['a', 'a', 'b', 'a']
batch_size = 5
prev_diff_ratio = 1.0
sampler = BalancedWeightedSampler(labels, batch_size=batch_size, duplicate=duplicate, seed=0)
indices = list(sampler)
assert (len(indices[0]) == batch_size)
counter = Counter()
for batch_indices in indices:
for idx in batch_indices:
counter.update(labels[idx])
diff_ratio = ((abs((counter['a'] - counter['b'])) / duplicate) * len(labels))
assert (diff_ratio < prev_diff_ratio)
prev_diff_ratio = diff_ratio
(diff_ratio < 0.05)
|
@pytest.mark.extra_dependency
def test_beam_decoder():
decoder = BeamDecoder()
emissions = torch.randn((4, 100, 31))
emissions = torch.log_softmax(emissions, dim=2)
hyps = decoder.decode(emissions)
|
def _download_with_timeout(timeout: float, num_process: int):
processes = []
for _ in range(num_process):
process = Process(target=_urls_to_filepaths, args=(URL,), kwargs=dict(refresh=True))
process.start()
processes.append(process)
exitcodes = []
for process in processes:
process.join(timeout=timeout)
exitcodes.append(process.exitcode)
assert (len(set(exitcodes)) == 1)
exitcode = exitcodes[0]
if (exitcode != 0):
for process in processes:
process.terminate()
|
def test_download():
filepath = Path(_urls_to_filepaths(URL, download=False))
if filepath.is_file():
os.remove(filepath)
logger.info('This should timeout')
_download_with_timeout(0.1, 2)
assert (not filepath.is_file()), 'The download should failed due to the too short timeout second: 0.1 sec, and hence there should not be any corrupted (incomplete) file'
logger.info('This should success')
_download_with_timeout(None, 2)
torch.load(filepath, map_location='cpu')
|
@pytest.mark.corpus
@pytest.mark.parametrize('fold_id', [0, 1, 2, 3, 4])
def test_er_dataset(fold_id):
v3_er_folder = (((Path(__file__).parent.parent / 's3prl') / 'downstream') / 'emotion')
IEMOCAP = dotenv_values()['IEMOCAP']
with (v3_er_folder / 'config.yaml').open() as file:
config = yaml.load(file, Loader=yaml.FullLoader)['downstream_expert']
config['datarc']['root'] = IEMOCAP
config['datarc']['meta_data'] = (v3_er_folder / 'meta_data')
config['datarc']['test_fold'] = f'fold{(fold_id + 1)}'
with tempfile.TemporaryDirectory() as tempdir:
expert = DownstreamExpert(320, config, tempdir)
train_dataset_v3 = expert.get_dataloader('train').dataset
valid_dataset_v3 = expert.get_dataloader('dev').dataset
test_dataset_v3 = expert.get_dataloader('test').dataset
with tempfile.TemporaryDirectory() as tempdir:
default_config = SuperbER().default_config()
(train_csv, valid_csv, test_csvs) = SuperbER().prepare_data({'iemocap': IEMOCAP, 'test_fold': fold_id}, tempdir, tempdir)
encoder_path = SuperbER().build_encoder(default_config['build_encoder'], tempdir, tempdir, train_csv, valid_csv, test_csvs)
train_dataset_v4 = SuperbER().build_dataset(default_config['build_dataset'], tempdir, tempdir, 'train', train_csv, encoder_path, None)
valid_dataset_v4 = SuperbER().build_dataset(default_config['build_dataset'], tempdir, tempdir, 'valid', valid_csv, encoder_path, None)
test_dataset_v4 = SuperbER().build_dataset(default_config['build_dataset'], tempdir, tempdir, 'test', test_csvs[0], encoder_path, None)
def compare_dataset(v3, v4):
data_v3 = {}
for (wav, label, name) in tqdm(v3, desc='v3'):
if isinstance(v3, Subset):
v3 = v3.dataset
label_name = [k for (k, v) in v3.class_dict.items() if (v == label)][0]
data_v3[name] = label_name
data_v4 = {}
for batch in tqdm(v4, desc='v4'):
data_v4[batch['unique_name']] = batch['label']
assert (sorted(data_v3.keys()) == sorted(data_v4.keys()))
for key in data_v3:
value_v3 = data_v3[key]
value_v4 = data_v4[key]
assert (value_v3 == value_v4)
compare_dataset(train_dataset_v3, train_dataset_v4)
compare_dataset(valid_dataset_v3, valid_dataset_v4)
compare_dataset(test_dataset_v3, test_dataset_v4)
|
@pytest.mark.corpus
def test_fluent_commands():
config = dotenv_values()
dataset_root = config['FluentSpeechCommands']
dataset = FluentSpeechCommands(dataset_root)
dataset.data_split_ids
dataset.data_split
dataset.all_data
|
def test_chunking():
chunks = list(chunking(0.0, 8.5, 2.0, 1.0, False))
assert (len(chunks) == 7)
chunks = list(chunking(1.1, 8.5, 2.0, 1.0, True))
assert (len(chunks) == 8)
|
def test_frame_tensor_label():
labels = [(0, 3.0, 4.1), (1, 1.2, 3.2)]
label = chunk_labels_to_frame_tensor_label(1.5, 4.0, labels, 3, 160)
assert (label[((- 1), 0)] == 1)
assert (label[(0, 1)] == 1)
|
def test_g2p():
g2p = G2P()
char_sent = 'HELLO WORLD'
phn_sent = g2p.encode(char_sent)
logging.info(phn_sent)
|
@pytest.mark.corpus
def test_librispeech_dataset():
config = dotenv_values()
dataset_root = config['LibriSpeech']
dataset = LibriSpeech(dataset_root, train_split=['train-clean-100', 'train-clean-360'], valid_split=['dev-clean', 'dev-other'], test_split=['test-clean', 'test-other'])
data = dataset.all_data
assert (len(data) == (292367 - libri_stats['train-other-500']))
|
@pytest.mark.corpus
def test_librilight():
config = dotenv_values()
train_corpus = LibriLight(config['LibriLight'])
eval_corpus = LibriSpeech(config['LibriSpeech'], 4, [])
train_data = train_corpus.all_data
(_, valid_data, test_data) = eval_corpus.data_split
assert (len(train_data) == 48)
|
def test_FrameLevel(helpers):
module = FrameLevel(3, 4, [5, 6])
x = torch.randn(32, 10, 3)
x_len = (torch.ones(32) * 3).long()
(h, hl) = module(x, x_len)
|
def test_load_audio():
with pseudo_audio([3.0, 4.0, 5.2]) as (paths, num_frames):
dataset = LoadAudio(paths, [None, 1.0, 3.1], [None, 3.2, None], max_secs=4.2)
for item in dataset:
assert isinstance(item['wav'], torch.Tensor)
|
def isclose(x: float, y: float) -> float:
return (abs((x - y)) < 1e-09)
|
def test_metric():
hyps = ['a ac abb d']
refs = ['a ab abc d']
assert isclose(cer(hyps, refs), 0.2)
assert isclose(wer(hyps, refs), 0.5)
assert isclose(per(hyps, refs), 0.5)
|
@pytest.mark.parametrize('pooling_type', ['MeanPooling', 'TemporalStatisticsPooling', 'AttentiveStatisticsPooling', 'SelfAttentivePooling'])
def test_utterance_level_with_pooling(pooling_type: str):
model = UtteranceLevel(256, 64, [128], 'ReLU', None, pooling_type, None)
output = model(torch.randn(32, 100, 256), (torch.arange(32) + 1))
assert (output.shape == (32, 64))
|
@pytest.mark.corpus
def test_quesst14_for_qbe():
def quesst14_for_qbe(dataset_root: str):
corpus = Quesst14(dataset_root)
def path_to_dict(path: str):
return dict(wav_path=path)
return dict(all_data={Path(path).stem: path_to_dict(path) for path in ((corpus.valid_queries + corpus.test_queries) + corpus.docs)}, valid_keys=[Path(path).stem for path in corpus.valid_queries], test_keys=[Path(path).stem for path in corpus.test_queries], doc_keys=[Path(path).stem for path in corpus.docs])
quesst_root = dotenv_values()['Quesst14']
(all_data, valid_keys, test_keys, doc_keys) = quesst14_for_qbe(quesst_root).values()
assert (len(all_data) == 2714)
assert (((len(valid_keys) + len(test_keys)) + len(doc_keys)) == 2714)
|
def test_rnn(helpers):
modules = [RNNEncoder(input_size=8, output_size=6, module='LSTM', hidden_size=[10, 10, 10], dropout=[0.1, 0.1, 0.1], layer_norm=[True, True, True], proj=[True, True, True], sample_rate=[1, 2, 1], sample_style='drop', bidirectional=True), RNNEncoder(input_size=8, output_size=6, module='LSTM', hidden_size=[10, 10, 10], dropout=[0.1, 0.1, 0.1], layer_norm=[True, True, True], proj=[True, True, True], sample_rate=[1, 2, 1], sample_style='concat', bidirectional=True)]
for module in modules:
xs = torch.randn(32, 50, module.input_size)
xs_len = ((torch.arange(32) + (50 - 32)) + 1)
(out, out_len) = module(xs, xs_len)
assert (out.shape[1] == 25)
assert (out.shape[2] == module.output_size)
assert (out_len.max() == 25)
|
def _merge_batch_indices(batch_indices):
all_indices = []
for indices in batch_indices:
all_indices += indices
return all_indices
|
@pytest.mark.parametrize('world_size', [1, 2, 3, 4, 5, 6, 7, 8])
def test_distributed_sampler(world_size):
sampler = [[1, 2, 3], [4, 5, 6, 7], [8], [9, 10]]
ddp_indices = []
for rank in range(world_size):
ddp_sampler = DistributedBatchSamplerWrapper(sampler, world_size, rank)
ddp_indices += _merge_batch_indices(ddp_sampler)
assert (sorted(ddp_indices) == sorted(_merge_batch_indices(sampler)))
|
@pytest.mark.parametrize('batch_size', [1, 2, 3, len(data)])
def test_FixedBatchSizeBatchSampler(batch_size):
dataset = data
iter1 = list(iter(FixedBatchSizeBatchSampler(dataset, batch_size, shuffle=False)))
iter2 = list(iter(FixedBatchSizeBatchSampler(dataset, batch_size, shuffle=True)))
indices1 = sorted(_merge_batch_indices(iter1))
indices2 = sorted(_merge_batch_indices(iter2))
assert (indices1 == indices2 == list(range(len(timestamps))))
|
@pytest.mark.corpus
def test_snips():
config = dotenv_values()
dataset_root = config['SNIPS']
dataset = SNIPS(dataset_root, ['Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly', 'Matthew', 'Salli'], ['Aditi', 'Amy', 'Geraint', 'Nicole'], ['Brian', 'Emma', 'Raveena', 'Russell'])
(train_data, valid_data, test_data) = dataset.data_split
assert (len(train_data) == 104672)
assert (len(valid_data) == 2800)
assert (len(test_data) == 2800)
|
def test_sorted_slice_sampler():
batch_size = 16
max_length = (16000 * 5)
lengths = [random.randint((16000 * 3), (16000 * 8)) for index in range(1000)]
sampler = SortedSliceSampler(lengths, batch_size=batch_size, max_length=max_length)
for epoch in range(5):
sampler.set_epoch(epoch)
id2length = lengths
for batch_ids in sampler:
batch_lengths = [id2length[idx] for idx in batch_ids]
assert (sorted(batch_lengths, reverse=True) == batch_lengths)
if (batch_lengths[0] > max_length):
assert (len(batch_lengths) == (batch_size // 2))
other_batch_sizes = [len(batch) for batch in sampler if (len(batch) not in [batch_size, (batch_size // 2)])]
assert (len(set(other_batch_sizes)) == len(other_batch_sizes))
assert (len(sampler) == len(lengths))
|
def test_sorted_bucketing_sampler():
batch_size = 16
max_length = (16000 * 5)
lengths = [random.randint((16000 * 3), (16000 * 8)) for index in range(1000)]
sampler = SortedBucketingSampler(lengths, batch_size=batch_size, max_length=max_length, shuffle=False)
for epoch in range(5):
sampler.set_epoch(epoch)
id2length = lengths
for batch_ids in sampler:
batch_lengths = [id2length[idx] for idx in batch_ids]
assert (sorted(batch_lengths, reverse=True) == batch_lengths)
if (batch_lengths[0] > max_length):
assert (len(batch_lengths) == (batch_size // 2))
batch_sizes = [len(batch_indices) for batch_indices in sampler]
other_batch_sizes = [batch_size for batch_size in batch_sizes if (batch_size not in [batch_size, (batch_size // 2)])]
assert (len(other_batch_sizes) <= 1)
assert ((len(lengths) / 16) < len(sampler) < (len(lengths) / 8))
|
def test_sox_effect():
effects = [['channels', '1'], ['rate', '16000'], ['gain', '-3.0']]
with tempfile.NamedTemporaryFile() as file:
tensor = torch.randn(1, (16000 * 10))
filename = f'{file.name}.wav'
torchaudio.save(filename, tensor, SAMPLE_RATE)
(wav1, sr1) = torchaudio.sox_effects.apply_effects_file(filename, effects=effects)
(wav2, sr2) = torchaudio.sox_effects.apply_effects_tensor(tensor, SAMPLE_RATE, effects)
torch.allclose(wav1, wav2)
assert (sr1 == sr2)
|
def test_specaug_model():
model = FrameLevelLinear(input_size=13, output_size=25, hidden_size=32)
model = ModelWithSpecaug(model)
assert (model.specaug.apply_time_mask == True)
assert (model.specaug.apply_freq_mask == True)
|
def _class_counter(data_dict):
counter = Counter()
for (data_id, data) in data_dict.items():
counter.update([data['class_name']])
return counter
|
@pytest.mark.corpus
def test_speech_commands():
env = dotenv_values()
corpus = SpeechCommandsV1(env['GSC1'], env['GSC1_TEST'])
all_data = corpus.all_data
classes = set([value['class_name'] for (key, value) in all_data.items()])
assert (len(classes) == 12), f'{classes}'
(train, valid, test) = corpus.data_split
train_class_counter = _class_counter(train)
valid_class_counter = _class_counter(valid)
test_class_counter = _class_counter(test)
assert (train_class_counter == Counter({'_unknown_': 32550, 'stop': 1885, 'on': 1864, 'go': 1861, 'yes': 1860, 'no': 1853, 'right': 1852, 'up': 1843, 'down': 1842, 'left': 1839, 'off': 1839, '_silence_': 6}))
assert (valid_class_counter == Counter({'_unknown_': 4221, 'stop': 246, 'on': 257, 'go': 260, 'yes': 261, 'no': 270, 'right': 256, 'up': 260, 'down': 264, 'left': 247, 'off': 256, '_silence_': 6}))
assert (test_class_counter == Counter({'_unknown_': 257, 'stop': 249, 'on': 246, 'go': 251, 'yes': 256, 'no': 252, 'right': 259, 'up': 272, 'down': 253, 'left': 267, 'off': 262, '_silence_': 257}))
|
def test_tokenizer():
char_tokenizer = CharacterTokenizer()
phone_tokenizer = default_phoneme_tokenizer()
char_text = 'HELLO WORLD'
char_text_enc = char_tokenizer.encode(char_text)
char_text_dec = char_tokenizer.decode(char_text_enc)
assert isinstance(char_text_enc, list)
assert (char_text == char_text_dec)
|
def test_version():
s3prl.__version__
|
def is_same_vocab(vocabs_1, vocabs_2):
if (len(vocabs_1) != len(vocabs_2)):
return False
for (v1, v2) in zip(vocabs_1, vocabs_2):
if (v1 != v2):
return False
return True
|
@pytest.mark.corpus
def test_vocabulary():
config = dotenv_values()
corpus = LibriSpeech(config['LibriSpeech'])
text_list = corpus.data_dict['train-clean-100']['text_list']
with tempfile.TemporaryDirectory() as directory:
logging.info(directory)
text_file = os.path.join(directory, 'text.txt')
with open(text_file, 'w') as fp:
for text in text_list:
fp.write((text + '\n'))
char_vocabs_1 = generate_vocab('character', text_list=text_list)
char_vocabs_2 = generate_vocab('character', text_file=text_file)
assert isinstance(char_vocabs_1, list)
assert isinstance(char_vocabs_2, list)
assert is_same_vocab(char_vocabs_1, char_vocabs_2)
char_tokenizer = load_tokenizer('character', vocab_list=char_vocabs_1)
assert (char_tokenizer.decode(char_tokenizer.encode(SAMPLE)) == SAMPLE)
word_vocabs_1 = generate_vocab('word', text_list=text_list, vocab_size=5000)
word_vocabs_2 = generate_vocab('word', text_file=text_file, vocab_size=5000)
assert isinstance(word_vocabs_1, list)
assert isinstance(word_vocabs_2, list)
assert is_same_vocab(word_vocabs_1, word_vocabs_2)
word_tokenizer = load_tokenizer('word', vocab_list=word_vocabs_1)
assert (word_tokenizer.decode(word_tokenizer.encode(SAMPLE)) == SAMPLE)
vocab_file_1 = os.path.join(directory, 'subword_1')
vocab_file_2 = os.path.join(directory, 'subword_2')
subword_vocabs_1 = generate_vocab('subword', text_list=text_list, vocab_size=500, output_file=vocab_file_1)
subword_vocabs_2 = generate_vocab('subword', text_file=text_file, vocab_size=500, output_file=vocab_file_2)
subword_tokenizer_1 = load_tokenizer('subword', vocab_file=(vocab_file_1 + '.model'))
subword_tokenizer_2 = load_tokenizer('subword', vocab_file=(vocab_file_2 + '.model'))
assert (subword_tokenizer_1.decode(subword_tokenizer_1.encode(SAMPLE)) == SAMPLE)
assert (subword_tokenizer_2.decode(subword_tokenizer_2.encode(SAMPLE)) == SAMPLE)
assert (subword_tokenizer_1.encode(SAMPLE) == subword_tokenizer_2.encode(SAMPLE))
|
@pytest.mark.corpus
@pytest.mark.parametrize('use_cache', [False, True])
def test_voxceleb1sid(use_cache):
config = dotenv_values()
voxceleb1 = Path(config['VoxCeleb1'])
if voxceleb1.is_dir():
(train_data, valid_data, test_data) = VoxCeleb1SID(voxceleb1).data_split
else:
raise ValueError('Please set the VoxCeleb1 path in .env')
|
def extract_single_name(name: str, ckpt: str, legacy: bool, output_dir: str, device: str, refresh: bool=False):
output_dir: Path = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
output_path = str((output_dir / f'{name}.pt').resolve())
if (Path(output_path).is_file() and (not refresh)):
return
model = getattr(hub, name)(ckpt=ckpt, legacy=legacy).to(device)
model.eval()
with torch.no_grad():
hidden_states = model(get_pseudo_wavs(device=device))['hidden_states']
hs = [h.detach().cpu() for h in hidden_states]
torch.save(hs, output_path)
|
def load_valid_paths():
with open('./valid_paths.txt', 'r') as fp:
paths = [line.strip() for line in fp if (line.strip() != '')]
return paths
|
def get_third_party():
txt_files = list(Path('./requirements').rglob('*.txt'))
package_list = []
for file in txt_files:
with open(file, 'r') as fp:
for line in fp:
line = line.strip()
if (line == ''):
continue
package_list.append(line.split(' ')[0])
return package_list
|
def run_command(command: str):
try:
check_output(command.split(' '))
except CalledProcessError as e:
print(e.output.decode('utf-8'))
raise
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('files', type=str, nargs='*', default=[], help='If no file is given, use the files under ./valid_paths.txt')
parser.add_argument('--check', action='store_true', help='Only checks the files')
args = parser.parse_args()
if (len(args.files) == 0):
args.files = load_valid_paths()
print(f'Formatting files: {args.files}')
args.files = ' '.join(args.files)
print('Run flake8')
run_command(f'flake8 {args.files} --count --select=E9,F63,F7,F82 --show-source --statistics')
run_command(f'flake8 {args.files} --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics')
print('Run black')
if args.check:
run_command(f'black --check {args.files}')
else:
run_command(f'black {args.files}')
print('Run isort')
third_party = get_third_party()
third_party = ','.join(third_party)
if args.check:
run_command(f'isort --profile black --thirdparty {third_party} --check {args.files}')
else:
run_command(f'isort --profile black --thirdparty {third_party} {args.files}')
if args.check:
print('Successfully passed the format check!')
|
def linkcode_resolve(domain, info):
def find_source():
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
if isinstance(obj, property):
return None
file_parts = Path(inspect.getsourcefile(obj)).parts
reversed_parts = []
for part in reversed(file_parts):
if (part == 's3prl'):
reversed_parts.append(part)
break
else:
reversed_parts.append(part)
fn = '/'.join(reversed(reversed_parts))
(source, lineno) = inspect.getsourcelines(obj)
return (fn, lineno, ((lineno + len(source)) - 1))
if ((domain != 'py') or (not info['module'])):
return None
tag = ('master' if ('dev' in release) else ('v' + release))
try:
filename = ('%s#L%d-L%d' % find_source())
except Exception:
filename = (info['module'].replace('.', '/') + '.py')
return ('https://github.com/s3prl/s3prl/blob/%s/%s' % (tag, filename))
|
class LowResourceLinearSuperbASR(SuperbASR):
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False):
(train_path, valid_path, test_paths) = super().prepare_data(prepare_data, target_dir, cache_dir, get_path_only)
df = pd.read_csv(train_path)
df = df.iloc[:100]
df.to_csv(train_path, index=False)
return (train_path, valid_path, test_paths)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
import torch
class Model(torch.nn.Module):
def __init__(self, input_size, output_size) -> None:
super().__init__()
self.linear = torch.nn.Linear(input_size, output_size)
def forward(self, x, x_len):
return (self.linear(x), x_len)
return Model(downstream_input_size, downstream_output_size)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('problem', help='The problem module. E.g. `s3prl.problem.ssl.tera.Tera`')
parser.add_argument('dataset_root', help='The dataset root for pretrain.')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--n_jobs', type=int, default=8)
parser.add_argument('--override', default=None, help='Override the default_config of the problem module. E.g. --override ValidSampler.batch_size=4,,TestSampler.batch_size=4')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--seed', type=int, default=1337)
args = parser.parse_args()
fix_random_seeds(args.seed)
problem = qualname_to_cls(args.problem)
config = Container(deepcopy(problem.default_config))
for (key, value) in vars(args).items():
if (key not in ['override']):
config[key] = value
if args.dryrun:
config.override(DRYRUN_CONFIG)
if (isinstance(args.override, str) and (len(args.override) > 0)):
override_dict = parse_override(args.override)
config.override(override_dict)
return (problem, config)
|
def main():
logging.basicConfig(level=logging.INFO)
(problem, config) = parse_args()
save_to = Path(config.save_to)
save_to.mkdir(exist_ok=True, parents=True)
body = problem.Body(**config.Body)
head = problem.Head(**config.Head)
loss = problem.Loss(**config.Loss)
stats = Container()
logger.info('Preparing corpus')
corpus = problem.Corpus(config.dataset_root, **config.Corpus)
(train_data, valid_data, test_data, corpus_stats) = corpus().split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = AugmentedDynamicItemDataset(train_data, tools=stats)
train_dataset = problem.TrainData(**config.TrainData)(train_dataset)
assert (train_dataset.get_tool('feat_dim') == problem.input_size)
train_sampler = DistributedBatchSamplerWrapper(problem.TrainSampler(train_dataset, **config.TrainSampler), num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, train_sampler, num_workers=config.n_jobs)
stats.add(train_dataset.all_tools())
logger.info('Preparing valid data')
valid_dataset = AugmentedDynamicItemDataset(valid_data, tools=stats)
valid_dataset = problem.ValidData(**config.ValidData)(valid_dataset)
valid_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(valid_dataset, **config.ValidSampler), num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, valid_sampler, num_workers=12)
logger.info('Preparing test data')
test_dataset = AugmentedDynamicItemDataset(test_data, tools=stats)
test_dataset = problem.TestData(**config.TestData)(test_dataset)
test_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(test_dataset, **config.TestSampler), num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, test_sampler, num_workers=12)
sorted_ckpt_dirs = sorted([file for file in save_to.iterdir() if (file.is_dir() and str(file).endswith('.ckpts'))], key=os.path.getmtime)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint((sorted_ckpt_dirs[1] / 'task.ckpt')).to(device)
else:
logger.info('Create a new model')
task = problem.Task(body, head, loss, **stats)
task = task.to(device)
(opt_cls_qualname, opt_cfgs) = config.Optimizer.split(1)
optimizer = qualname_to_cls(opt_cls_qualname)(task.parameters(), **opt_cfgs)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
optimizer.load_state_dict(torch.load((sorted_ckpt_dirs[(- 1)] / 'optimizer.ckpt')))
if config.Trainer.use_valid:
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
valid_best_score = torch.load((sorted_ckpt_dirs[(- 1)] / 'valid_best_score.ckpt'))[config.Trainer.valid_metric]
else:
valid_best_score = ((- 100000) if config.Trainer.valid_higher_better else 100000)
def save_checkpoint(name):
ckpt_dir: Path = (save_to / f'{name}.ckpts')
ckpt_dir.mkdir(parents=True, exist_ok=True)
logger.info(f'Save checkpoint to: {ckpt_dir}')
if hasattr(problem, 'save_checkpoint'):
logger.info(f'Save upstream checkpoint to: {ckpt_dir}')
problem.save_checkpoint(config, body, head, (ckpt_dir / 'upstream.ckpt'))
task.save_checkpoint((ckpt_dir / 'task.ckpt'))
torch.save(optimizer.state_dict(), (ckpt_dir / 'optimizer.ckpt'))
torch.save({config.Trainer.valid_metric: valid_best_score}, (ckpt_dir / 'valid_best_score.ckpt'))
pbar = tqdm(total=config.Trainer.total_steps, desc='Total')
train_completed = False
accum_grad_steps = 0
while (not train_completed):
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss /= config.Trainer.gradient_accumulate_steps
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=config.Trainer.gradient_clipping)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
optimizer.zero_grad()
accum_grad_steps = 0
else:
accum_grad_steps += 1
if (accum_grad_steps == config.Trainer.gradient_accumulate_steps):
optimizer.step()
optimizer.zero_grad()
accum_grad_steps = 0
batch_results.append(result.cacheable())
if ((global_step % config.Trainer.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for (name, value) in logs.Scalar.items():
if (name == 'loss'):
value *= config.Trainer.gradient_accumulate_steps
logger.info(f'{name}: {value}')
batch_results = []
if ((global_step % config.Trainer.valid_step) == 0):
with torch.no_grad():
if config.Trainer.use_valid:
valid_results = []
for (batch_idx, batch) in enumerate(tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
task.eval()
result = task.valid_step(**batch)
valid_results.append(result.cacheable())
logs: Logs = task.valid_reduction(valid_results).slice(1)
logger.info(f'[Valid] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
if (name == config.Trainer.valid_metric):
cond1 = (config.Trainer.valid_higher_better and (value > valid_best_score))
cond2 = ((not config.Trainer.valid_higher_better) and (value < valid_best_score))
if (cond1 or cond2):
valid_best_score = value
save_checkpoint('valid_best')
if (((global_step % config.Trainer.save_step) == 0) or (global_step == config.Trainer.total_steps)):
save_checkpoint(f'global_step_{global_step}')
if (global_step == config.Trainer.total_steps):
train_completed = True
break
test_results = []
for (batch_idx, batch) in enumerate(tqdm(test_dataloader, desc='Test', total=len(test_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
result = task.test_step(**batch)
test_results.append(result.cacheable())
logs: Logs = task.test_reduction(test_results).slice(1)
logger.info(f'[Test] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('upstream', help='The upstream name. E.g. wav2vec2')
parser.add_argument('problem', help='The problem module. E.g. s3prl.problem.SuperbSID')
parser.add_argument('dataset_root', help='The dataset root of your problem.')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--feature_selection', default='hidden_states')
parser.add_argument('--n_jobs', type=int, default=6)
parser.add_argument('--override', default=None, help='Override the default_config of the problem module. E.g. --override ValidSampler.batch_size=4,,TestSampler.batch_size=4')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--seed', type=int, default=1337)
args = parser.parse_args()
fix_random_seeds(args.seed)
problem = qualname_to_cls(args.problem)
config = Container(deepcopy(problem.default_config))
for (key, value) in vars(args).items():
if (key not in ['override']):
config[key] = value
if args.dryrun:
config.override(DRYRUN_CONFIG)
if (isinstance(args.override, str) and (len(args.override) > 0)):
override_dict = parse_override(args.override)
config.override(override_dict)
return (problem, config)
|
def main():
logging.basicConfig(level=logging.INFO)
(problem, config) = parse_args()
save_to = Path(config.save_to)
save_to.mkdir(exist_ok=True, parents=True)
upstream = S3PRLUpstream(config.upstream, config.feature_selection)
stats = Container(upstream_rate=upstream.downsample_rate)
logger.info('Preparing corpus')
corpus = problem.Corpus(config.dataset_root, **config.Corpus)
(train_data, valid_data, test_data, corpus_stats) = corpus().split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = AugmentedDynamicItemDataset(train_data, tools=stats)
train_dataset = problem.TrainData(**config.TrainData)(train_dataset)
train_sampler = DistributedBatchSamplerWrapper(problem.TrainSampler(train_dataset, **config.TrainSampler), num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, train_sampler, num_workers=config.n_jobs)
stats.add(train_dataset.all_tools())
logger.info('Preparing valid data')
valid_dataset = AugmentedDynamicItemDataset(valid_data, tools=stats)
valid_dataset = problem.ValidData(**config.ValidData)(valid_dataset)
valid_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(valid_dataset, **config.ValidSampler), num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, valid_sampler, num_workers=12)
logger.info('Preparing test data')
test_dataset = AugmentedDynamicItemDataset(test_data, tools=stats)
test_dataset = problem.TestData(**config.TestData)(test_dataset)
test_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(test_dataset, **config.TestSampler), num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, test_sampler, num_workers=12)
sorted_ckpt_dirs = sorted([file for file in save_to.iterdir() if (file.is_dir() and str(file).endswith('.ckpts'))], key=os.path.getmtime)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint((sorted_ckpt_dirs[1] / 'task.ckpt')).to(device)
else:
logger.info('Create a new model')
downstream = problem.Downstream(upstream.output_size, **stats)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, **stats, **config.Task)
task = task.to(device)
(opt_cls_qualname, opt_cfgs) = config.Optimizer.split(1)
optimizer = qualname_to_cls(opt_cls_qualname)(task.parameters(), **opt_cfgs)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
optimizer.load_state_dict(torch.load((sorted_ckpt_dirs[(- 1)] / 'optimizer.ckpt')))
if config.Trainer.use_valid:
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
valid_best_score = torch.load((sorted_ckpt_dirs[(- 1)] / 'valid_best_score.ckpt'))[config.Trainer.valid_metric]
else:
valid_best_score = ((- 100000) if config.Trainer.valid_higher_better else 100000)
def save_checkpoint(name):
ckpt_dir: Path = (save_to / f'{name}.ckpts')
ckpt_dir.mkdir(parents=True, exist_ok=True)
logger.info(f'Save checkpoint to: {ckpt_dir}')
task.save_checkpoint((ckpt_dir / 'task.ckpt'))
torch.save(optimizer.state_dict(), (ckpt_dir / 'optimizer.ckpt'))
torch.save({config.Trainer.valid_metric: valid_best_score}, (ckpt_dir / 'valid_best_score.ckpt'))
pbar = tqdm(total=config.Trainer.total_steps, desc='Total')
train_completed = False
accum_grad_steps = 0
while (not train_completed):
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss /= config.Trainer.gradient_accumulate_steps
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=config.Trainer.gradient_clipping)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
optimizer.zero_grad()
accum_grad_steps = 0
else:
accum_grad_steps += 1
if (accum_grad_steps == config.Trainer.gradient_accumulate_steps):
optimizer.step()
optimizer.zero_grad()
accum_grad_steps = 0
batch_results.append(result.cacheable())
if ((global_step % config.Trainer.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for (name, value) in logs.Scalar.items():
if (name == 'loss'):
value *= config.Trainer.gradient_accumulate_steps
logger.info(f'{name}: {value}')
batch_results = []
if ((global_step % config.Trainer.valid_step) == 0):
with torch.no_grad():
if config.Trainer.use_valid:
valid_results = []
for (batch_idx, batch) in enumerate(tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
task.eval()
result = task.valid_step(**batch)
valid_results.append(result.cacheable())
logs: Logs = task.valid_reduction(valid_results).slice(1)
logger.info(f'[Valid] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
if (name == config.Trainer.valid_metric):
cond1 = (config.Trainer.valid_higher_better and (value > valid_best_score))
cond2 = ((not config.Trainer.valid_higher_better) and (value < valid_best_score))
if (cond1 or cond2):
valid_best_score = value
save_checkpoint('valid_best')
if (((global_step % config.Trainer.save_step) == 0) or (global_step == config.Trainer.total_steps)):
save_checkpoint(f'global_step_{global_step}')
if (global_step == config.Trainer.total_steps):
train_completed = True
break
test_results = []
for (batch_idx, batch) in enumerate(tqdm(test_dataloader, desc='Test', total=len(test_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
result = task.test_step(**batch)
test_results.append(result.cacheable())
logs: Logs = task.test_reduction(test_results).slice(1)
logger.info(f'[Test] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('load_from', help='The directory containing all the checkpoints')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
load_from = Path(args.load_from)
task: Task = Object.load_checkpoint((load_from / 'task.ckpt')).to(device)
task.eval()
test_dataset: Dataset = Object.load_checkpoint((load_from / 'test_dataset.ckpt'))
test_dataloader = test_dataset.to_dataloader(batch_size=1, num_workers=6)
with torch.no_grad():
for batch in test_dataloader:
batch: Output = batch.to(device)
result = task(**batch.subset('x', 'x_len', as_type='dict'))
for (name, prediction) in zip(batch.name, result.prediction):
print(name, prediction)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('librispeech', help='The root directory of LibriSpeech')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
librispeech = Path(args.librispeech)
assert librispeech.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(librispeech, splits=['train-clean-100', 'dev-clean', 'test-clean'])
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 1000), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=4, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.ValidSampler(valid_dataset, 8)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=4, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_sampler=test_sampler, num_workers=4, collate_fn=test_dataset.collate_fn)
latest_task = (save_to / 'task.ckpt')
if latest_task.is_file():
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint(latest_task).to(device)
else:
logger.info('No last checkpoint found. Create new model')
upstream = S3PRLUpstream('apc')
downstream = problem.DownstreamModel(upstream.output_size, preprocessor.statistics().output_size, hidden_size=[512], dropout=[0.2])
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().label_loader)
task = task.to(device)
optimizer = optim.Adam(task.parameters(), lr=0.001)
latest_optimizer = (save_to / 'optimizer.ckpt')
if latest_optimizer.is_file():
optimizer.load_state_dict(torch.load((save_to / 'optimizer.ckpt')))
else:
optimizer = optim.Adam(task.parameters(), lr=0.001)
pbar = tqdm(total=args.total_steps, desc='Total')
while True:
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
optimizer.zero_grad()
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=1.0)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
else:
optimizer.step()
cacheable_result = result.cacheable()
batch_results.append(cacheable_result)
if (((global_step + 1) % args.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
batch_results = []
if (((global_step + 1) % args.eval_step) == 0):
with torch.no_grad():
task.eval()
valid_results = []
for batch in tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader)):
batch = batch.to(device)
result = task.valid_step(**batch)
cacheable_result = result.cacheable()
valid_results.append(cacheable_result)
logs: Logs = task.valid_reduction(valid_results).logs
logger.info(f'[Valid] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
if (((global_step + 1) % args.save_step) == 0):
task.save_checkpoint((save_to / 'task.ckpt'))
torch.save(optimizer.state_dict(), (save_to / 'optimizer.ckpt'))
with torch.no_grad():
test_results = []
for batch in tqdm(test_dataloader, desc='Test', total=len(test_dataloader)):
batch = batch.to(device)
result = task.test_step(**batch)
cacheable_result = result.cacheable()
test_results.append(cacheable_result)
logs: Logs = task.test_reduction(test_results).logs
logger.info(f'[Test] step results')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('librispeech', help='The root directory of LibriSpeech')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--not_resume', action='store_true', help="Don't resume from the last checkpoint")
parser.add_argument('--limit_train_batches', type=int)
parser.add_argument('--limit_val_batches', type=int)
parser.add_argument('--fast_dev_run', action='store_true')
args = parser.parse_args()
return args
|
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
librispeech = Path(args.librispeech)
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(librispeech)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_dataloader = train_dataset.to_dataloader(batch_size=8, num_workers=6, shuffle=True)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataloader = valid_dataset.to_dataloader(batch_size=8, num_workers=6)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataloader = test_dataset.to_dataloader(batch_size=8, num_workers=6)
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
upstream = S3PRLUpstream('apc')
downstream = problem.DownstreamModel(upstream.output_size, preprocessor.statistics().output_size)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().label_loader)
optimizer = optim.Adam(task.parameters(), lr=0.001)
lightning_task = LightningModuleSimpleWrapper(task, optimizer)
checkpoint_callback = ModelCheckpoint(dirpath=str(save_to), filename='superb-asr-{step:02d}-{valid_0_wer:.2f}', monitor='valid_0_wer', save_last=True, save_top_k=3, mode='min', every_n_train_steps=args.save_step)
trainer = Trainer(callbacks=[checkpoint_callback], accelerator='gpu', gpus=1, max_steps=args.total_steps, log_every_n_steps=args.log_step, val_check_interval=args.eval_step, limit_val_batches=(args.limit_val_batches or 1.0), limit_train_batches=(args.limit_train_batches or 1.0), fast_dev_run=args.fast_dev_run)
last_ckpt = (save_to / 'last.ckpt')
if (args.not_resume or (not last_ckpt.is_file())):
last_ckpt = None
trainer.fit(lightning_task, train_dataloader, val_dataloaders=[valid_dataloader, test_dataloader], ckpt_path=last_ckpt)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('load_from', help='The directory containing all the checkpoints')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
load_from = Path(args.load_from)
task: Task = Object.load_checkpoint((load_from / 'task.ckpt')).to(device)
task.eval()
test_dataset: Dataset = Object.load_checkpoint((load_from / 'test_dataset.ckpt'))
test_dataloader = test_dataset.to_dataloader(batch_size=1, num_workers=6)
with torch.no_grad():
for batch in test_dataloader:
batch: Output = batch.to(device)
result = task(**batch.subset('x', 'x_len', as_type='dict'))
for (name, prediction) in zip(batch.name, result.prediction):
print(name, prediction)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--resume', action='store_true')
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 200), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=12, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.ValidSampler(valid_dataset, 8)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=12, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_size=8, num_workers=12, collate_fn=test_dataset.collate_fn)
latest_task = (save_to / 'task.ckpt')
if (args.resume and latest_task.is_file()):
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint(latest_task).to(device)
else:
logger.info('No last checkpoint found. Create new model')
upstream = S3PRLUpstream('wav2vec2')
downstream = problem.DownstreamModel(upstream.output_size, len(preprocessor.statistics().category))
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().category)
task = task.to(device)
optimizer = optim.Adam(task.parameters(), lr=0.001)
latest_optimizer = (save_to / 'optimizer.ckpt')
if (args.resume and latest_optimizer.is_file()):
optimizer.load_state_dict(torch.load((save_to / 'optimizer.ckpt')))
else:
optimizer = optim.Adam(task.parameters(), lr=0.001)
pbar = tqdm(total=args.total_steps, desc='Total')
while True:
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
optimizer.zero_grad()
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=1.0)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
else:
optimizer.step()
cacheable_result = result.cacheable()
batch_results.append(cacheable_result)
if (((global_step + 1) % args.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
batch_results = []
if (((global_step + 1) % args.eval_step) == 0):
with torch.no_grad():
task.eval()
valid_results = []
for batch in tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader)):
batch = batch.to(device)
result = task.valid_step(**batch)
cacheable_result = result.cacheable()
valid_results.append(cacheable_result)
logs: Logs = task.valid_reduction(valid_results).logs
logger.info(f'[Valid] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
test_results = []
for batch in tqdm(test_dataloader, desc='Test', total=len(test_dataloader)):
batch = batch.to(device)
result = task.test_step(**batch)
cacheable_result = result.cacheable()
test_results.append(cacheable_result)
logs: Logs = task.test_reduction(test_results).logs
logger.info(f'[Test] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
if (((global_step + 1) % args.save_step) == 0):
task.save_checkpoint((save_to / 'task.ckpt'))
torch.save(optimizer.state_dict(), (save_to / 'optimizer.ckpt'))
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--not_resume', action='store_true', help="Don't resume from the last checkpoint")
parser.add_argument('--limit_train_batches', type=int)
parser.add_argument('--limit_val_batches', type=int)
parser.add_argument('--fast_dev_run', action='store_true')
args = parser.parse_args()
return args
|
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_dataloader = train_dataset.to_dataloader(batch_size=8, num_workers=6, shuffle=True)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataloader = valid_dataset.to_dataloader(batch_size=8, num_workers=6)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataloader = test_dataset.to_dataloader(batch_size=8, num_workers=6)
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
upstream = S3PRLUpstream('apc')
downstream = problem.DownstreamModel(upstream.output_size, len(preprocessor.statistics().category))
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().category)
optimizer = optim.Adam(task.parameters(), lr=0.001)
lightning_task = LightningModuleSimpleWrapper(task, optimizer)
checkpoint_callback = ModelCheckpoint(dirpath=str(save_to), filename='superb-sid-{step:02d}-{valid_0_accuracy:.2f}', monitor='valid_0_accuracy', save_last=True, save_top_k=3, mode='max', every_n_train_steps=args.save_step)
trainer = Trainer(callbacks=[checkpoint_callback], accelerator='gpu', gpus=1, max_steps=args.total_steps, log_every_n_steps=args.log_step, val_check_interval=args.eval_step, limit_val_batches=(args.limit_val_batches or 1.0), limit_train_batches=(args.limit_train_batches or 1.0), fast_dev_run=args.fast_dev_run)
last_ckpt = (save_to / 'last.ckpt')
if (args.not_resume or (not last_ckpt.is_file())):
last_ckpt = None
trainer.fit(lightning_task, train_dataloader, val_dataloaders=[valid_dataloader, test_dataloader], ckpt_path=last_ckpt)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--load_from', type=str, default='result/sv', help='The directory containing all the checkpoints')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
load_from = Path(args.load_from)
task: Task = Object.load_checkpoint((load_from / 'task.ckpt')).to(device)
task.eval()
test_dataset: Dataset = Object.load_checkpoint((load_from / 'test_dataset.ckpt'))
test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=6, collate_fn=test_dataset.collate_fn)
with torch.no_grad():
for batch in test_dataloader:
batch: Output = batch.to(device)
result = task(**batch.subset('x', 'x_len', as_type='dict'))
print(result.hidden_states.shape)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--voxceleb1', type=str, default='/work/jason410/PublicData/Voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('--save_to', type=str, default='result/sv', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=200)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--backbone', type=str, default='XVector')
parser.add_argument('--pooling_type', type=str, default='TAP')
parser.add_argument('--loss_type', type=str, default='softmax')
parser.add_argument('--spk_embd_dim', type=int, default=1500)
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 1000), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=6, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.TrainSampler(valid_dataset, max_timestamp=(16000 * 1000), shuffle=True)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=6, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=6, collate_fn=test_dataset.collate_fn)
latest_task = (save_to / 'task.ckpt')
if latest_task.is_file():
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint(latest_task).to(device)
else:
logger.info('No last checkpoint found. Create new model')
upstream = S3PRLUpstream('apc')
downstream = problem.speaker_embedding_extractor(backbone=args.backbone, pooling_type=args.pooling_type, input_size=upstream.output_size, output_size=args.spk_embd_dim)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model=model, categories=preprocessor.statistics().category, loss_type=args.loss_type, trials=test_dataset.statistics().label)
task = task.to(device)
optimizer = optim.Adam(task.parameters(), lr=0.001)
latest_optimizer = (save_to / 'optimizer.ckpt')
if latest_optimizer.is_file():
optimizer.load_state_dict(torch.load((save_to / 'optimizer.ckpt')))
else:
optimizer = optim.Adam(task.parameters(), lr=0.001)
pbar = tqdm(total=args.total_steps, desc='Total')
while True:
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
optimizer.zero_grad()
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=1.0)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
else:
optimizer.step()
cacheable_result = result.cacheable()
batch_results.append(cacheable_result)
if (((global_step + 1) % args.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
batch_results = []
if (((global_step + 1) % args.eval_step) == 0):
with torch.no_grad():
task.eval()
valid_results = []
for batch in tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader)):
batch = batch.to(device)
result = task.valid_step(**batch)
cacheable_result = result.cacheable()
valid_results.append(cacheable_result)
logs: Logs = task.valid_reduction(valid_results).logs
logger.info(f'[Valid] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
test_results = []
for batch in tqdm(test_dataloader, desc='Test', total=len(test_dataloader)):
batch = batch.to(device)
result = task.test_step(**batch)
test_results.append(result)
logs: Logs = task.test_reduction(batch_results=test_results).logs
logger.info(f'[Test] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
if (((global_step + 1) % args.save_step) == 0):
task.save_checkpoint((save_to / 'task.ckpt'))
torch.save(optimizer.state_dict(), (save_to / 'optimizer.ckpt'))
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--voxceleb1', type=str, default='/work/jason410/PublicData/Voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('--save_to', type=str, default='lightning_result/sv', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=1000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--not_resume', action='store_true', help="Don't resume from the last checkpoint")
parser.add_argument('--limit_train_batches', type=int)
parser.add_argument('--limit_val_batches', type=int)
parser.add_argument('--fast_dev_run', action='store_true')
parser.add_argument('--backbone', type=str, default='XVector')
parser.add_argument('--pooling_type', type=str, default='TAP')
parser.add_argument('--loss_type', type=str, default='softmax')
parser.add_argument('--spk_embd_dim', type=int, default=1500)
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 1000), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=6, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.TrainSampler(valid_dataset, max_timestamp=(16000 * 1000), shuffle=True)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=6, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=6, collate_fn=test_dataset.collate_fn)
upstream = S3PRLUpstream('apc')
downstream = problem.speaker_embedding_extractor(backbone=args.backbone, pooling_type=args.pooling_type, input_size=upstream.output_size, output_size=args.spk_embd_dim)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model=model, categories=preprocessor.statistics().category, loss_type=args.loss_type, trials=test_dataset.statistics().label)
optimizer = optim.Adam(task.parameters(), lr=0.001)
lightning_task = LightningModuleSimpleWrapper(task, optimizer)
checkpoint_callback = ModelCheckpoint(dirpath=str(save_to), filename='superb-sv-{step:02d}-{valid_0_accuracy:.2f}', monitor='valid_0_accuracy', save_last=True, save_top_k=3, mode='max', every_n_train_steps=args.save_step)
trainer = Trainer(callbacks=[checkpoint_callback], accelerator='gpu', gpus=1, max_steps=args.total_steps, log_every_n_steps=args.log_step, val_check_interval=args.eval_step, limit_val_batches=(args.limit_val_batches or 1.0), limit_train_batches=(args.limit_train_batches or 1.0), fast_dev_run=args.fast_dev_run)
last_ckpt = (save_to / 'last.ckpt')
if (args.not_resume or (not last_ckpt.is_file())):
last_ckpt = None
trainer.fit(lightning_task, train_dataloader, val_dataloaders=valid_dataloader, ckpt_path=last_ckpt)
trainer.test(lightning_task, dataloaders=test_dataloader, ckpt_path=last_ckpt)
|
def default_collate_fn(samples, padding_value: int=0):
'\n Each item in **DynamicItemDataset** is a dict\n This function pad (or transform into numpy list) a batch of dict\n\n Args:\n samples (List[dict]): Suppose each Container is in\n\n .. code-block:: yaml\n\n wav: a single waveform\n label: a single string\n\n Return:\n dict\n\n .. code-block:: yaml\n\n wav: padded waveforms\n label: np.array([a list of string labels])\n '
assert isinstance(samples[0], dict)
keys = samples[0].keys()
padded_samples = dict()
for key in keys:
values = [sample[key] for sample in samples]
if isinstance(values[0], int):
values = torch.LongTensor(values)
elif isinstance(values[0], float):
values = torch.FloatTensor(values)
elif isinstance(values[0], np.ndarray):
values = [torch.from_numpy(value).float() for value in values]
values = pad_sequence(values, batch_first=True, padding_value=padding_value)
elif isinstance(values[0], torch.Tensor):
values = pad_sequence(values, batch_first=True, padding_value=padding_value)
else:
values = np.array(values, dtype='object')
padded_samples[key] = values
return padded_samples
|
class Corpus():
@property
@abc.abstractmethod
def all_data(self) -> dict:
raise NotImplementedError
@property
@abc.abstractmethod
def data_split_ids(self):
raise NotImplementedError
@property
def data_split(self):
(train_ids, valid_ids, test_ids) = self.data_split_ids
all_data = self.all_data
train_data = {idx: all_data[idx] for idx in train_ids}
valid_data = {idx: all_data[idx] for idx in valid_ids}
test_data = {idx: all_data[idx] for idx in test_ids}
return (train_data, valid_data, test_data)
@staticmethod
def dataframe_to_datapoints(df: pd.DataFrame, unique_name_fn: callable):
data_points = {}
for (_, row) in df.iterrows():
data_point = dict()
for (name, value) in row.iteritems():
data_point[name] = value
unique_name = unique_name_fn(data_point)
data_points[unique_name] = data_point
assert (len(data_points) == len(df)), f'{len(data_point)} != {len(df)}'
return data_points
|
class FluentSpeechCommands(Corpus):
'\n Parse the Fluent Speech Command dataset\n\n Args:\n dataset_root: (str) The dataset root of Fluent Speech Command\n '
def __init__(self, dataset_root: str, n_jobs: int=4) -> None:
self.dataset_root = Path(dataset_root)
self.train = self.dataframe_to_datapoints(pd.read_csv(((self.dataset_root / 'data') / 'train_data.csv')), self._get_unique_name)
self.valid = self.dataframe_to_datapoints(pd.read_csv(((self.dataset_root / 'data') / 'valid_data.csv')), self._get_unique_name)
self.test = self.dataframe_to_datapoints(pd.read_csv(((self.dataset_root / 'data') / 'test_data.csv')), self._get_unique_name)
data_points = OrderedDict()
data_points.update(self.train)
data_points.update(self.valid)
data_points.update(self.test)
data_points = {key: self._parse_data(data) for (key, data) in data_points.items()}
self._all_data = data_points
@staticmethod
def _get_unique_name(data_point):
return Path(data_point['path']).stem
def _parse_data(self, data):
return dict(path=(self.dataset_root / data['path']), speakerId=data['speakerId'], transcription=data['transcription'], action=data['action'], object=data['object'], location=data['location'])
@property
def all_data(self):
"\n Return all the data points in a dict of the format\n\n .. code-block:: yaml\n\n data_id1:\n path: (str) The waveform path\n speakerId: (str) The speaker name\n transcription: (str) The transcription\n action: (str) The action\n object: (str) The action's targeting object\n location: (str) The location where the action happens\n\n data_id2:\n ...\n "
return self._all_data
@property
def data_split(self):
'\n Return a list:\n\n :code:`train_data`, :code:`valid_data`, :code:`test_data`\n\n each is a dict following the format specified in :obj:`all_data`\n '
return super().data_split
@property
def data_split_ids(self):
'\n Return a list:\n\n :code:`train_ids`, :code:`valid_ids`, :code:`test_ids`\n\n Each is a list containing data_ids. data_ids can be used as the key to access the :obj:`all_data`\n '
return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys()))
@classmethod
def download_dataset(cls, tgt_dir: str) -> None:
'\n Download and unzip the dataset to :code:`tgt_dir`/fluent_speech_commands_dataset\n\n Args:\n tgt_dir (str): The root directory containing many different datasets\n '
import os
import tarfile
import requests
tgt_dir = Path(tgt_dir)
tgt_dir.mkdir(exists_ok=True, parents=True)
def unzip_targz_then_delete(filepath: str):
with tarfile.open(os.path.abspath(filepath)) as tar:
tar.extractall(path=os.path.abspath(tgt_dir))
os.remove(os.path.abspath(filepath))
def download_from_url(url: str):
filename = url.split('/')[(- 1)].replace(' ', '_')
filepath = os.path.join(tgt_dir, filename)
r = requests.get(url, stream=True)
if r.ok:
logger.info(f'Saving {filename} to', os.path.abspath(filepath))
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
logger.info(f'{filename} successfully downloaded')
unzip_targz_then_delete(filepath)
else:
logger.info(f'''Download failed: status code {r.status_code}
{r.text}''')
if (not (os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'fluent_speech_commands_dataset/wavs')) and os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'fluent_speech_commands_dataset/data/speakers')))):
download_from_url('http://140.112.21.28:9000/fluent.tar.gz')
logger.info(f'Fluent speech commands dataset downloaded. Located at {os.path.abspath(tgt_dir)}/fluent_speech_commands_dataset/')
|
class IEMOCAP(Corpus):
'\n Parse the IEMOCAP dataset\n\n Args:\n dataset_root: (str) The dataset root of IEMOCAP\n '
def __init__(self, dataset_root: str, n_jobs: int=4) -> None:
self.dataset_root = Path(dataset_root)
self.sessions = [self._preprocess_single_session(self.dataset_root, session_id) for session_id in range(1, (IEMOCAP_SESSION_NUM + 1))]
self._all_data = dict()
for session in self.sessions:
self._all_data.update(session['improvised'])
self._all_data.update(session['scripted'])
@staticmethod
def _preprocess_single_session(dataset_root: Path, session_id: int):
data = dict(improvised={}, scripted={})
session_dir = (dataset_root / f'Session{session_id}')
label_dir = (session_dir / LABEL_DIR_PATH)
wav_root_dir = (session_dir / WAV_DIR_PATH)
wav_paths = find_files(wav_root_dir)
for wav_path in wav_paths:
wav_path = Path(wav_path)
spk_and_act_and_scene = wav_path.parts[(- 2)]
label_file = (label_dir / f'{spk_and_act_and_scene}.txt')
with label_file.open() as file:
content = file.read()
result = re.search(f'{str(wav_path.stem)} (.+) ', content)
speaker = spk_and_act_and_scene.split('_')[0]
act = ('improvised' if ('impro' in spk_and_act_and_scene) else 'scripted')
emotion = result.groups()[0]
unique_id = wav_path.stem
data[act][unique_id] = dict(wav_path=str(wav_path), speaker=speaker, act=act, emotion=emotion, session_id=session_id)
return data
@property
def all_data(self):
'\n Return:\n dict\n\n all the data points of IEMOCAP in the format of\n\n .. code-block:: yaml\n\n data_id1:\n wav_path (str): The waveform path\n speaker (str): The speaker name\n act (str): improvised / scripted\n emotion (str): The emotion label\n session_id (int): The session\n\n data_id2:\n ...\n '
return deepcopy(self._all_data.copy())
def get_whole_session(self, session_id: int):
'\n Args:\n session_id (int): The session index selected from 1, 2, 3, 4, 5\n\n Return:\n dict\n\n data points in a single session (containing improvised and scripted recordings) in the\n same format as :obj:`all_data`\n '
output = dict()
output.update(self.get_session_with_act(session_id, 'improvised'))
output.update(self.get_session_with_act(session_id, 'scripted'))
return deepcopy(output)
def get_session_with_act(self, session_id: int, act: str):
"\n Args:\n session_id (int): The session index selected from 1, 2, 3, 4, 5\n act (str): 'improvised' or 'scripted'\n\n Return:\n :obj:`s3prl.base.container.Container`\n\n data points in a single session with a specific act (either improvised or scripted) in the\n same format as :obj:`all_data`\n "
assert (act in ['improvised', 'scripted'])
return deepcopy(self.sessions[(session_id - 1)][act])
@classmethod
def download_dataset(cls, tgt_dir: str) -> None:
import os
import tarfile
import requests
assert os.path.exists(os.path.abspath(tgt_dir)), 'Target directory does not exist'
def unzip_targz_then_delete(filepath: str):
with tarfile.open(os.path.abspath(filepath)) as tar:
tar.extractall(path=os.path.abspath(tgt_dir))
os.remove(os.path.abspath(filepath))
def download_from_url(url: str):
filename = url.split('/')[(- 1)].replace(' ', '_')
filepath = os.path.join(tgt_dir, filename)
r = requests.get(url, stream=True)
if r.ok:
logger.info(f'Saving {filename} to', os.path.abspath(filepath))
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
logger.info(f'{filename} successfully downloaded')
unzip_targz_then_delete(filepath)
else:
logger.info(f'''Download failed: status code {r.status_code}
{r.text}''')
if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'IEMOCAP/'))):
download_from_url('http://140.112.21.28:9000/IEMOCAP.tar.gz')
logger.info(f'IEMOCAP dataset downloaded. Located at {os.path.abspath(tgt_dir)}/IEMOCAP/')
|
def read_text(file: Path) -> str:
src_file = ('-'.join(str(file).split('-')[:(- 1)]) + '.trans.txt')
idx = file.stem.replace('.flac', '')
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
return line[:(- 1)].split(' ', 1)[1]
logging.warning(f'Transcription of {file} not found!')
|
def check_no_repeat(splits: List[str]) -> bool:
count = defaultdict(int)
for split in splits:
count[split] += 1
repeated = ''
for (key, val) in count.items():
if (val > 1):
repeated += f' {key} ({val} times)'
if (len(repeated) != 0):
logging.warning(f'Found repeated splits in corpus: {repeated}, which might cause unexpected behaviors.')
return False
return True
|
def _parse_spk_to_gender(speaker_file: Path) -> dict:
speaker_file = Path(speaker_file)
with speaker_file.open() as file:
lines = [line.strip() for line in file.readlines()]
for line_id in range(len(lines)):
line = lines[line_id]
if (('SEX' in line) and ('SUBSET' in line) and ('MINUTES' in line) and ('NAME' in line)):
break
line_id += 1
spk2gender = {}
for line_id in range(line_id, len(lines)):
line = lines[line_id]
line = re.sub('\t+', ' ', line)
line = re.sub(' +', ' ', line)
parts = line.split('|', maxsplit=4)
(ID, SEX, SUBSET, MINUTES, NAME) = parts
spk2gender[int(ID)] = SEX.strip()
return spk2gender
|
class LibriLight(Corpus):
def __init__(self, dataset_root: str, n_jobs: int=4, train_split: str='10m-fold0') -> None:
self.dataset_root = Path(dataset_root).resolve()
self.train_split = train_split
if (train_split == '10h'):
roots = [(self.dataset_root / '1h'), (self.dataset_root / '9h')]
elif (train_split == '1h'):
roots = [(self.dataset_root / '1h')]
elif train_split.startswith('10m'):
fold_id = int(train_split.split('-')[(- 1)].split('fold')[(- 1)])
roots = [((self.dataset_root / '1h') / str(fold_id))]
else:
raise ValueError(f'Unsupported split: {train_split}')
self._data = self._collect_data(roots, n_jobs)
@classmethod
def download_dataset(cls, dataset_root: str):
Path(dataset_root).mkdir(parents=True, exist_ok=True)
subprocess.check_call(['wget', 'https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz', '-O', str((Path(dataset_root) / 'librispeech_finetuning.tgz'))])
subprocess.check_call(['tar', 'zxvf', 'librispeech_finetuning.tgz', '-C', str(Path(dataset_root))])
@property
def all_data(self):
return self._data
@staticmethod
def _collect_data(roots: List[Path], n_jobs: int=4) -> Dict[(str, Dict[(str, List[Any])])]:
spkr_file = _urls_to_filepaths(LIBRISPEECH_SPKR_INFO)
spkr2gender = _parse_spk_to_gender(Path(spkr_file).resolve())
data_dict = {}
for split_dir in roots:
if (not os.path.exists(split_dir)):
logging.info(f'Split {split_dir} is not downloaded. Skip data collection.')
continue
wav_list = list(Path(split_dir).rglob('*.flac'))
name_list = [file.stem.replace('.flac', '') for file in wav_list]
text_list = Parallel(n_jobs=n_jobs)((delayed(read_text)(file) for file in wav_list))
spkr_list = [int(name.split('-')[0]) for name in name_list]
for wav_id in range(len(wav_list)):
wav = Path(wav_list[wav_id])
data_dict[wav.stem] = {'wav_path': str(wav.resolve()), 'transcription': text_list[wav_id], 'speaker': spkr_list[wav_id], 'gender': spkr2gender[spkr_list[wav_id]]}
return data_dict
|
def read_text(file: Path) -> str:
src_file = ('-'.join(str(file).split('-')[:(- 1)]) + '.trans.txt')
idx = file.stem.replace('.flac', '')
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
return line[:(- 1)].split(' ', 1)[1]
logger.warning(f'Transcription of {file} not found!')
|
def check_no_repeat(splits: List[str]) -> bool:
count = defaultdict(int)
for split in splits:
count[split] += 1
repeated = ''
for (key, val) in count.items():
if (val > 1):
repeated += f' {key} ({val} times)'
if (len(repeated) != 0):
logger.warning(f'Found repeated splits in corpus: {repeated}, which might cause unexpected behaviors.')
return False
return True
|
def _parse_spk_to_gender(speaker_file: Path) -> dict:
speaker_file = Path(speaker_file)
with speaker_file.open() as file:
lines = [line.strip() for line in file.readlines()]
for line_id in range(len(lines)):
line = lines[line_id]
if (('SEX' in line) and ('SUBSET' in line) and ('MINUTES' in line) and ('NAME' in line)):
break
line_id += 1
spk2gender = {}
for line_id in range(line_id, len(lines)):
line = lines[line_id]
line = re.sub('\t+', ' ', line)
line = re.sub(' +', ' ', line)
parts = line.split('|', maxsplit=4)
(ID, SEX, SUBSET, MINUTES, NAME) = parts
spk2gender[int(ID)] = SEX.strip()
return spk2gender
|
class LibriSpeech(Corpus):
'LibriSpeech Corpus\n Link: https://www.openslr.org/12\n\n Args:\n dataset_root (str): Path to LibriSpeech corpus directory.\n n_jobs (int, optional): Number of jobs. Defaults to 4.\n train_split (List[str], optional): Training splits. Defaults to ["train-clean-100"].\n valid_split (List[str], optional): Validation splits. Defaults to ["dev-clean"].\n test_split (List[str], optional): Testing splits. Defaults to ["test-clean"].\n '
def __init__(self, dataset_root: str, n_jobs: int=4, train_split: List[str]=['train-clean-100'], valid_split: List[str]=['dev-clean'], test_split: List[str]=['test-clean']) -> None:
self.dataset_root = Path(dataset_root).resolve()
self.train_split = train_split
self.valid_split = valid_split
self.test_split = test_split
self.all_splits = ((train_split + valid_split) + test_split)
assert check_no_repeat(self.all_splits)
self.data_dict = self._collect_data(dataset_root, self.all_splits, n_jobs)
self.train = self._data_to_dict(self.data_dict, train_split)
self.valid = self._data_to_dict(self.data_dict, valid_split)
self.test = self._data_to_dict(self.data_dict, test_split)
self._data = OrderedDict()
self._data.update(self.train)
self._data.update(self.valid)
self._data.update(self.test)
def get_corpus_splits(self, splits: List[str]):
return self._data_to_dict(self.data_dict, splits)
@property
def all_data(self):
"\n Return all the data points in a dict of the format\n\n .. code-block:: yaml\n\n data_id1:\n wav_path: (str) The waveform path\n transcription: (str) The transcription\n speaker: (str) The speaker name\n gender: (str) The speaker's gender\n corpus_split: (str) The split of corpus this sample belongs to\n\n data_id2:\n ...\n "
return self._data
@property
def data_split_ids(self):
return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys()))
@staticmethod
def _collect_data(dataset_root: str, splits: List[str], n_jobs: int=4) -> Dict[(str, Dict[(str, List[Any])])]:
spkr2gender = _parse_spk_to_gender((Path(dataset_root) / 'SPEAKERS.TXT'))
data_dict = {}
for split in splits:
split_dir = os.path.join(dataset_root, split)
if (not os.path.exists(split_dir)):
logger.info(f'Split {split} is not downloaded. Skip data collection.')
continue
wav_list = list(Path(split_dir).rglob('*.flac'))
name_list = [file.stem.replace('.flac', '') for file in wav_list]
text_list = Parallel(n_jobs=n_jobs)((delayed(read_text)(file) for file in wav_list))
spkr_list = [int(name.split('-')[0]) for name in name_list]
(wav_list, name_list, text_list, spkr_list) = zip(*[(wav, name, text, spkr) for (wav, name, text, spkr) in sorted(zip(wav_list, name_list, text_list, spkr_list), key=(lambda x: x[1]))])
data_dict[split] = {'name_list': list(name_list), 'wav_list': list(wav_list), 'text_list': list(text_list), 'spkr_list': list(spkr_list), 'gender_list': [spkr2gender[spkr] for spkr in spkr_list]}
return data_dict
@staticmethod
def _data_to_dict(data_dict: Dict[(str, Dict[(str, List[Any])])], splits: List[str]) -> dict():
data = dict({name: {'wav_path': data_dict[split]['wav_list'][i], 'transcription': data_dict[split]['text_list'][i], 'speaker': data_dict[split]['spkr_list'][i], 'gender': data_dict[split]['gender_list'][i], 'corpus_split': split} for split in splits for (i, name) in enumerate(data_dict[split]['name_list'])})
return data
@classmethod
def download_dataset(cls, target_dir: str, splits: List[str]=['train-clean-100', 'dev-clean', 'test-clean']) -> None:
import os
import tarfile
import requests
target_dir = Path(target_dir)
target_dir.mkdir(exist_ok=True, parents=True)
def unzip_targz_then_delete(filepath: str):
with tarfile.open(os.path.abspath(filepath)) as tar:
tar.extractall(path=os.path.abspath(target_dir))
os.remove(os.path.abspath(filepath))
def download_from_url(url: str):
filename = url.split('/')[(- 1)].replace(' ', '_')
filepath = os.path.join(target_dir, filename)
r = requests.get(url, stream=True)
if r.ok:
logger.info(f'Saving {filename} to {os.path.abspath(filepath)}')
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
logger.info(f'{filename} successfully downloaded')
unzip_targz_then_delete(filepath)
else:
logger.info(f'''Download failed: status code {r.status_code}
{r.text}''')
for split in splits:
if (not os.path.exists(os.path.join(os.path.abspath(target_dir), ('Librispeech/' + split)))):
download_from_url((('https://www.openslr.org/resources/12/' + split) + '.tar.gz'))
logger.info((', '.join(splits) + f'downloaded. Located at {os.path.abspath(target_dir)}/Librispeech/'))
|
class Quesst14():
def __init__(self, dataset_root: str):
dataset_root = Path(dataset_root)
self.doc_paths = self._english_audio_paths(dataset_root, 'language_key_utterances.lst')
self.dev_query_paths = self._english_audio_paths(dataset_root, f'language_key_dev.lst')
self.eval_query_paths = self._english_audio_paths(dataset_root, f'language_key_eval.lst')
self.n_dev_queries = len(self.dev_query_paths)
self.n_eval_queries = len(self.eval_query_paths)
self.n_docs = len(self.doc_paths)
@staticmethod
def _english_audio_paths(dataset_root_path, lst_name):
'Extract English audio paths.'
audio_paths = []
with open(((dataset_root_path / 'scoring') / lst_name)) as f:
for line in f:
(audio_path, lang) = tuple(line.strip().split())
if (lang != 'nnenglish'):
continue
audio_path = re.sub('^.*?\\/', '', audio_path)
audio_paths.append((dataset_root_path / audio_path))
return audio_paths
@property
def valid_queries(self):
return self.dev_query_paths
@property
def test_queries(self):
return self.eval_query_paths
@property
def docs(self):
'\n Valid and Test share the same document database\n '
return self.doc_paths
@classmethod
def download_dataset(cls, tgt_dir: str) -> None:
import os
import tarfile
import requests
assert os.path.exists(os.path.abspath(tgt_dir)), 'Target directory does not exist'
def unzip_targz_then_delete(filepath: str):
with tarfile.open(os.path.abspath(filepath)) as tar:
tar.extractall(path=os.path.abspath(tgt_dir))
os.remove(os.path.abspath(filepath))
def download_from_url(url: str):
filename = url.split('/')[(- 1)].replace(' ', '_')
filepath = os.path.join(tgt_dir, filename)
r = requests.get(url, stream=True)
if r.ok:
logger.info(f'Saving {filename} to', os.path.abspath(filepath))
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
logger.info(f'{filename} successfully downloaded')
unzip_targz_then_delete(filepath)
else:
logger.info(f'''Download failed: status code {r.status_code}
{r.text}''')
if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'quesst14Database/'))):
download_from_url('https://speech.fit.vutbr.cz/files/quesst14Database.tgz')
logger.info(f'Quesst14 dataset downloaded. Located at {os.path.abspath(tgt_dir)}/quesst14Database/')
|
class SNIPS(Corpus):
def __init__(self, dataset_root: str, train_speakers: List[str], valid_speakers: List[str], test_speakers: List[str]) -> None:
self.dataset_root = Path(dataset_root)
self.train_speakers = train_speakers
self.valid_speakers = valid_speakers
self.test_speakers = test_speakers
self.data_dict = self._collect_data(self.dataset_root, train_speakers, valid_speakers, test_speakers)
self.train = self._data_to_dict(self.data_dict, ['train'])
self.valid = self._data_to_dict(self.data_dict, ['valid'])
self.test = self._data_to_dict(self.data_dict, ['test'])
self._data = OrderedDict()
self._data.update(self.train)
self._data.update(self.valid)
self._data.update(self.test)
@property
def all_data(self):
return self._data
@property
def data_split_ids(self):
return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys()))
@staticmethod
def _collect_data(dataset_root: str, train_speakers: List[str], valid_speakers: List[str], test_speakers: List[str]) -> Dict[(str, Dict[(str, Any)])]:
transcripts_file = open((dataset_root / 'all.iob.snips.txt')).readlines()
transcripts = {}
for line in transcripts_file:
line = line.strip().split(' ')
index = line[0]
sent = ' '.join(line[1:])
transcripts[index] = sent
data_dict = {}
for (split, speaker_list) in [('train', train_speakers), ('valid', valid_speakers), ('test', test_speakers)]:
wav_list = list((dataset_root / split).rglob('*.wav'))
(new_wav_list, name_list, spkr_list) = ([], [], [])
uf = 0
for i in trange(len(wav_list), desc='checking files'):
uid = wav_list[i].stem
if (uid in transcripts):
spkr = uid.split('-')[0]
if (spkr in speaker_list):
new_wav_list.append(str(wav_list[i]))
name_list.append(uid)
spkr_list.append(spkr)
else:
logging.info(wav_list[i], 'Not Found')
uf += 1
logging.info(('%d wav file with label not found in text file!' % uf))
wav_list = new_wav_list
logging.info(f'loaded audio from {len(speaker_list)} speakers {str(speaker_list)} with {len(wav_list)} examples.')
assert (len(wav_list) > 0), 'No data found @ {}'.format((dataset_root / split))
text_list = [transcripts[name] for name in name_list]
(wav_list, name_list, text_list, spkr_list) = zip(*[(wav, name, text, spkr) for (wav, name, text, spkr) in sorted(zip(wav_list, name_list, text_list, spkr_list), key=(lambda x: x[1]))])
data_dict[split] = {'name_list': name_list, 'wav_list': wav_list, 'text_list': text_list, 'spkr_list': spkr_list}
return data_dict
@staticmethod
def _data_to_dict(data_dict: Dict[(str, Dict[(str, List[Any])])], splits: List[str]) -> dict:
data = dict({name: {'wav_path': data_dict[split]['wav_list'][i], 'transcription': ' '.join(data_dict[split]['text_list'][i].split('\t')[0].strip().split(' ')[1:(- 1)]), 'iob': ' '.join(data_dict[split]['text_list'][i].split('\t')[1].strip().split(' ')[1:(- 1)]), 'intent': data_dict[split]['text_list'][i].split('\t')[1].strip().split(' ')[(- 1)], 'speaker': data_dict[split]['spkr_list'][i], 'corpus_split': split} for split in splits for (i, name) in enumerate(data_dict[split]['name_list'])})
return data
|
class SpeechCommandsV1(Corpus):
"\n Args:\n dataset_root (str): should contain a 'dev' sub-folder for the training/validation set\n and a 'test' sub-folder for the testing set\n "
def __init__(self, gsc1: str, gsc1_test: str, n_jobs: int=4) -> None:
train_dataset_root = Path(gsc1)
test_dataset_root = Path(gsc1_test)
(train_list, valid_list) = self.split_dataset(train_dataset_root)
train_list = self.parse_train_valid_data_list(train_list, train_dataset_root)
valid_list = self.parse_train_valid_data_list(valid_list, train_dataset_root)
test_list = self.parse_test_data_list(test_dataset_root)
self.train = self.list_to_dict(train_list)
self.valid = self.list_to_dict(valid_list)
self.test = self.list_to_dict(test_list)
self._data = OrderedDict()
self._data.update(self.train)
self._data.update(self.valid)
self._data.update(self.test)
@staticmethod
def split_dataset(root_dir: Union[(str, Path)], max_uttr_per_class=((2 ** 27) - 1)) -> Tuple[(List[Tuple[(str, str)]], List[Tuple[(str, str)]])]:
'Split Speech Commands into 3 set.\n\n Args:\n root_dir: speech commands dataset root dir\n max_uttr_per_class: predefined value in the original paper\n\n Return:\n train_list: [(class_name, audio_path), ...]\n valid_list: as above\n '
(train_list, valid_list) = ([], [])
for entry in Path(root_dir).iterdir():
if ((not entry.is_dir()) or (entry.name == '_background_noise_')):
continue
for audio_path in entry.glob('*.wav'):
speaker_hashed = re.sub('_nohash_.*$', '', audio_path.name)
hashed_again = hashlib.sha1(speaker_hashed.encode('utf-8')).hexdigest()
percentage_hash = ((int(hashed_again, 16) % (max_uttr_per_class + 1)) * (100.0 / max_uttr_per_class))
if (percentage_hash < 10):
valid_list.append((entry.name, audio_path))
elif (percentage_hash < 20):
pass
else:
train_list.append((entry.name, audio_path))
return (train_list, valid_list)
@staticmethod
def parse_train_valid_data_list(data_list, train_dataset_root: Path):
data = [((class_name, audio_path) if (class_name in CLASSES) else ('_unknown_', audio_path)) for (class_name, audio_path) in data_list]
data += [('_silence_', audio_path) for audio_path in Path(train_dataset_root, '_background_noise_').glob('*.wav')]
return data
@staticmethod
def parse_test_data_list(test_dataset_root: Path):
data = [(class_dir.name, audio_path) for class_dir in Path(test_dataset_root).iterdir() if class_dir.is_dir() for audio_path in class_dir.glob('*.wav')]
return data
@staticmethod
def path_to_unique_name(path: str):
return '/'.join(Path(path).parts[(- 2):])
@classmethod
def list_to_dict(cls, data_list):
data = dict({cls.path_to_unique_name(audio_path): {'wav_path': audio_path, 'class_name': class_name} for (class_name, audio_path) in data_list})
return data
@property
def all_data(self):
'\n Return:\n Container: id (str)\n wav_path (str)\n class_name (str)\n '
return self._data
@property
def data_split_ids(self):
return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys()))
@classmethod
def download_dataset(cls, tgt_dir: str) -> None:
import os
import tarfile
import requests
assert os.path.exists(os.path.abspath(tgt_dir)), 'Target directory does not exist'
def unzip_targz_then_delete(filepath: str, filename: str):
file_path = os.path.join(os.path.abspath(tgt_dir), 'CORPORA_DIR', filename.replace('.tar.gz', ''))
os.makedirs(file_path)
with tarfile.open(os.path.abspath(filepath)) as tar:
tar.extractall(path=os.path.abspath(file_path))
os.remove(os.path.abspath(filepath))
def download_from_url(url: str):
filename = url.split('/')[(- 1)].replace(' ', '_')
filepath = os.path.join(tgt_dir, filename)
r = requests.get(url, stream=True)
if r.ok:
logger.info(f'Saving {filename} to', os.path.abspath(filepath))
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
logger.info(f'{filename} successfully downloaded')
unzip_targz_then_delete(filepath, filename)
else:
logger.info(f'''Download failed: status code {r.status_code}
{r.text}''')
if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'CORPORA_DIR/speech_commands_v0.01'))):
download_from_url('http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz')
if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'CORPORA_DIR/speech_commands_test_set_v0.01'))):
download_from_url('http://download.tensorflow.org/data/speech_commands_test_set_v0.01.tar.gz')
logger.info(f'Speech commands dataset downloaded. Located at {os.path.abspath(tgt_dir)}/CORPORA_DIR/')
|
class VoxCeleb1SID(Corpus):
def __init__(self, dataset_root: str, n_jobs: int=4, cache_root: str=CACHE_ROOT) -> None:
self.dataset_root = Path(dataset_root).resolve()
uid2split = self._get_standard_usage(self.dataset_root, cache_root)
self._split2uids = defaultdict(list)
for (uid, split) in uid2split.items():
self._split2uids[split].append(Path(uid.replace('/', '-')).stem)
uid2wavpath = self._find_wavs_with_uids(self.dataset_root, sorted(uid2split.keys()), n_jobs=n_jobs)
self._data = {Path(uid.replace('/', '-')).stem: {'wav_path': uid2wavpath[uid], 'label': self._build_label(uid)} for uid in uid2split.keys()}
@property
def all_data(self):
return self._data
@property
def data_split_ids(self):
return (self._split2uids['train'], self._split2uids['valid'], self._split2uids['test'])
@staticmethod
def _get_standard_usage(dataset_root: Path, cache_root: Path):
split_filename = SPLIT_FILE_URL.split('/')[(- 1)]
split_filepath = (Path(cache_root) / split_filename)
if (not split_filepath.is_file()):
with FileLock((str(split_filepath) + '.lock')):
os.system(f'wget {SPLIT_FILE_URL} -O {str(split_filepath)}')
standard_usage = [line.strip().split(' ') for line in open(split_filepath, 'r').readlines()]
def code2split(code: int):
splits = ['train', 'valid', 'test']
return splits[(code - 1)]
standard_usage = {uid: code2split(int(split)) for (split, uid) in standard_usage}
return standard_usage
@staticmethod
def _find_wavs_with_uids(dataset_root, uids, n_jobs=4):
def find_wav_with_uid(uid):
found_wavs = list(dataset_root.glob(f'*/wav/{uid}'))
assert (len(found_wavs) == 1)
return (uid, found_wavs[0])
uids_with_wavs = Parallel(n_jobs=n_jobs)((delayed(find_wav_with_uid)(uid) for uid in tqdm(uids, desc='Search wavs')))
uids2wav = {uid: wav for (uid, wav) in uids_with_wavs}
return uids2wav
@staticmethod
def _build_label(uid):
id_string = uid.split('/')[0]
label = f'speaker_{(int(id_string[2:]) - 10001)}'
return label
@classmethod
def download_dataset(cls, target_dir: str, splits: List[str]=['dev', 'test']) -> None:
tgt_dir = os.path.abspath(target_dir)
assert os.path.exists(tgt_dir), 'Target directory does not exist'
from zipfile import ZipFile
import requests
def unzip_then_delete(filepath: str, split: str):
assert os.path.exists(filepath), 'File not found!'
with ZipFile(filepath) as zipf:
zipf.extractall(path=os.path.join(tgt_dir, 'Voxceleb1', split))
os.remove(os.path.abspath(filepath))
def download_from_url(url: str, split: str):
filename = url.split('/')[(- 1)].replace(' ', '_')
filepath = os.path.join(tgt_dir, filename)
r = requests.get(url, stream=True)
if r.ok:
logger.info(f'Saving {filename} to', filepath)
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
logger.info(f'{filename} successfully downloaded')
else:
logger.info(f'''Download failed: status code {r.status_code}
{r.text}''')
return filepath
def download_dev():
partpaths = []
for part in ['a', 'b', 'c', 'd']:
if os.path.exists(os.path.join(tgt_dir, f'vox1_dev_wav_parta{part}')):
logger.info(f'vox1_dev_wav_parta{part} exists, skip donwload')
partpaths.append(os.path.join(tgt_dir, f'vox1_dev_wav_parta{part}'))
continue
fp = download_from_url(f'https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_parta{part}', 'dev')
partpaths.append(fp)
zippath = os.path.join(tgt_dir, 'vox1_dev_wav.zip')
with open(zippath, 'wb') as outfile:
for f in partpaths:
with open(f, 'rb') as infile:
for line in infile:
outfile.write(line)
for f in partpaths:
os.remove(f)
unzip_then_delete(zippath, 'dev')
for split in splits:
if (not os.path.exists(os.path.join(tgt_dir, (('Voxceleb1/' + split) + '/wav')))):
if (split == 'dev'):
download_dev()
else:
filepath = download_from_url('https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_test_wav.zip', 'test')
unzip_then_delete(filepath, 'test')
logger.info(f'Voxceleb1 dataset downloaded. Located at {tgt_dir}/Voxceleb1/')
|
class VoxCeleb1SV(Corpus):
def __init__(self, dataset_root: str, download_dir: str, force_download: bool=True) -> None:
self.dataset_root = Path(dataset_root).resolve()
(train_path, valid_path, test_path, speakerid2label) = self.format_path(self.dataset_root, download_dir, force_download)
self.categories = speakerid2label
self.train_data = self.path2data(train_path, speakerid2label)
self.valid_data = self.path2data(valid_path, speakerid2label)
self.test_data = {self.path2uid(path): {'wav_path': path, 'label': None} for path in test_path}
self.test_trials = self.format_test_trials(download_dir, force_download)
@classmethod
def path2uid(cls, path):
return '-'.join(Path(path).parts[(- 3):])
@classmethod
def path2data(cls, paths, speakerid2label):
data = {cls.path2uid(path): {'wav_path': path, 'label': speakerid2label[Path(path).parts[(- 3)]]} for path in paths}
return data
@staticmethod
def format_path(dataset_root, download_dir, force_download: bool):
split_filename = SPLIT_FILE_URL.split('/')[(- 1)]
split_filepath = (Path(download_dir) / split_filename)
_download(split_filepath, SPLIT_FILE_URL, refresh=force_download)
usage_list = open(split_filepath, 'r').readlines()
(train, valid, test) = ([], [], [])
test_list = [item for item in usage_list if (int(item.split(' ')[1].split('/')[0][2:]) in range(10270, 10310))]
usage_list = list(set(usage_list).difference(set(test_list)))
test_list = [item.split(' ')[1] for item in test_list]
logging.info('search specified wav name for each split')
speakerids = []
for string in tqdm(usage_list, desc='Search train, dev wavs'):
pair = string.split()
index = pair[0]
x = list(dataset_root.glob(('dev/wav/' + pair[1])))
speakerStr = pair[1].split('/')[0]
if (speakerStr not in speakerids):
speakerids.append(speakerStr)
if ((int(index) == 1) or (int(index) == 3)):
train.append(str(x[0]))
elif (int(index) == 2):
valid.append(str(x[0]))
else:
raise ValueError
speakerids = sorted(speakerids)
speakerid2label = {}
for (idx, spk) in enumerate(speakerids):
speakerid2label[spk] = idx
for string in tqdm(test_list, desc='Search test wavs'):
x = list(dataset_root.glob(('test/wav/' + string.strip())))
test.append(str(x[0]))
logging.info(f'finish searching wav: train {len(train)}; valid {len(valid)}; test {len(test)} files found')
return (train, valid, test, speakerid2label)
@classmethod
def format_test_trials(cls, download_dir: str, force_download: bool):
trial_filename = TRIAL_FILE_URL.split('/')[(- 1)]
trial_filepath = (Path(download_dir) / trial_filename)
_download(trial_filepath, TRIAL_FILE_URL, refresh=force_download)
trial_list = open(trial_filepath, 'r').readlines()
test_trials = []
for string in tqdm(trial_list, desc='Prepare testing trials'):
pair = string.split()
test_trials.append((int(pair[0]), cls.path2uid(pair[1]), cls.path2uid(pair[2])))
return test_trials
@property
def all_data(self):
return (self.train_data, self.valid_data, self.test_data, self.test_trials)
@property
def data_split_ids(self):
return None
|
class Dataset(data.Dataset):
def __len__(self) -> int:
raise NotImplementedError
def __getitem__(self, index: int):
raise NotImplementedError
def getinfo(self, index: int):
raise NotImplementedError
|
class EncodeCategory(Dataset):
def __init__(self, labels: List[str], encoder: CategoryEncoder) -> None:
super().__init__()
self.labels = labels
self.encoder = encoder
def __len__(self):
return len(self.labels)
def __getitem__(self, index: int):
label = self.labels[index]
return {'label': label, 'class_id': self.encoder.encode(label)}
|
class EncodeCategories(Dataset):
def __init__(self, labels: List[List[str]], encoders: CategoryEncoders) -> None:
super().__init__()
self.labels = labels
self.encoders = encoders
def __len__(self):
return len(self.labels)
def __getitem__(self, index: int):
labels = self.labels[index]
return {'labels': labels, 'class_ids': torch.LongTensor(self.encoders.encode(labels))}
|
class EncodeMultiLabel(Dataset):
def __init__(self, labels: List[List[str]], encoder: CategoryEncoder) -> None:
super().__init__()
self.labels = labels
self.encoder = encoder
def __len__(self):
return len(self.labels)
@staticmethod
def label_to_binary_vector(label_ids: List[int], num_labels: int) -> torch.Tensor:
if (len(label_ids) == 0):
binary_labels = torch.zeros((num_labels,), dtype=torch.float)
else:
binary_labels = torch.zeros((num_labels,)).scatter(0, torch.tensor(label_ids), 1.0)
assert (set(torch.where((binary_labels == 1.0))[0].numpy()) == set(label_ids))
return binary_labels
def __getitem__(self, index: int):
labels = self.labels[index]
label_ids = [self.encoder.encode(label) for label in labels]
binary_labels = self.label_to_binary_vector(label_ids, len(self.encoder))
return {'labels': labels, 'binary_labels': binary_labels}
|
class EncodeText(Dataset):
def __init__(self, text: List[str], tokenizer: Tokenizer, iob: List[str]=None) -> None:
super().__init__()
self.text = text
self.iob = iob
if (iob is not None):
assert (len(text) == len(iob))
self.tokenizer = tokenizer
def __len__(self):
return len(self.text)
def __getitem__(self, index: int):
text = self.text[index]
if (self.iob is not None):
iob = self.iob[index]
tokenized_ids = self.tokenizer.encode(text, iob)
text = self.tokenizer.decode(tokenized_ids)
else:
tokenized_ids = self.tokenizer.encode(text)
return {'labels': text, 'class_ids': torch.LongTensor(tokenized_ids)}
|
def get_info(dataset, names: List[str], cache_dir: str=None, n_jobs: int=6):
logger.info(f"Getting info from dataset {dataset.__class__.__qualname__}: {' '.join(names)}")
if isinstance(cache_dir, (str, Path)):
logger.info(f'Using cached info in {cache_dir}')
cache_dir: Path = Path(cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)
try:
data = dataset.getinfo(0)
for name in names:
assert (name in data)
except:
fn = dataset.__getitem__
else:
fn = dataset.getinfo
def _get(idx):
if isinstance(cache_dir, (str, Path)):
cache_path: Path = (Path(cache_dir) / f'{idx}.json')
if cache_path.is_file():
with cache_path.open() as f:
cached = json.load(f)
all_presented = True
for name in names:
if (name not in cached):
all_presented = False
if all_presented:
return cached
data = fn(idx)
info = {}
for name in names:
info[name] = data[name]
if isinstance(cache_dir, (str, Path)):
cache_path: Path = (Path(cache_dir) / f'{idx}.json')
with cache_path.open('w') as f:
json.dump(info, f)
return info
infos = Parallel(n_jobs=n_jobs, backend='threading')((delayed(_get)(idx) for idx in tqdm(range(len(dataset)))))
organized_info = defaultdict(list)
for info in infos:
for (k, v) in info.items():
organized_info[k].append(v)
output = []
for name in names:
output.append(organized_info[name])
if (len(output) == 1):
return output[0]
else:
return output
|
class CategoryEncoder():
def __init__(self, category: List[str]) -> None:
self.category = list(sorted(set(category)))
def __len__(self) -> int:
return len(self.category)
def encode(self, label: str) -> int:
return self.category.index(label)
def decode(self, index: int) -> str:
return self.category[index]
|
class CategoryEncoders():
def __init__(self, categories: List[List[str]]) -> None:
self.categories = [CategoryEncoder(c) for c in categories]
def __len__(self) -> int:
return sum([len(c) for c in self.categories])
def __iter__(self):
for category in self.categories:
(yield category)
def encode(self, labels: List[str]) -> List[int]:
assert (len(labels) == len(self.categories))
return [encoder.encode(label) for (label, encoder) in zip(labels, self.categories)]
def decode(self, indices: List[int]) -> List[str]:
return [encoder.decode(index) for (index, encoder) in zip(indices, self.categories)]
|
def parse_lexicon(line: str) -> Tuple[(str, List[str])]:
line.replace('\t', ' ')
(word, *phonemes) = line.split()
return (word, phonemes)
|
def read_lexicon_files(file_list: List[str]) -> Dict[(str, List[str])]:
w2p_dict = defaultdict(list)
for file in file_list:
with open(file, 'r') as fp:
lines = [line.strip() for line in fp]
for line in lines:
(word, phonemes) = parse_lexicon(line)
w2p_dict[word].append(phonemes)
w2p = {}
for (word, phonemes_all) in w2p_dict.items():
if (len(phonemes_all) > 1):
logging.info(f'{len(phonemes_all)} phoneme sequences found for {word}.')
for (i, phonemes) in enumerate(phonemes_all):
logging.info(f'{i}. {phonemes}')
w2p[word] = phonemes_all[0]
logging.info('Taking the first phoneme sequences for a deterministic behavior.')
return w2p
|
class G2P():
'Grapheme-to-phoneme\n\n Args:\n file_list (List[str], optional): List of lexicon files. Defaults to None.\n allow_unk (bool): If false, raise Error when a word can not be recognized by this basic G2P\n '
def __init__(self, file_list: List[str]=None, allow_unk: bool=False):
self.allow_unk = allow_unk
if (file_list is None):
file_list = _urls_to_filepaths(*DEFAULT_LEXICON_URL)
self.word2phone = read_lexicon_files(file_list)
def encode(self, text: str) -> str:
'Converts grapheme-based sentences to phonemes\n\n Args:\n text (str): Sentence\n\n Returns:\n str: Phonemized sentence\n '
word_list = text.strip().upper().split(' ')
phonemes = []
for word in word_list:
if (not self.allow_unk):
assert (word in self.word2phone)
phonemes += self.word2phone.get(word, ['<UNK>'])
return ' '.join(phonemes)
|
class Tokenizer():
def __init__(self):
super().__init__()
@abc.abstractmethod
def encode(self, text: str, iob: str=None) -> List[int]:
raise NotImplementedError
@abc.abstractmethod
def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str:
raise NotImplementedError
def __len__(self):
return self.vocab_size
@abc.abstractproperty
def vocab_size(self) -> int:
raise NotImplementedError
@abc.abstractproperty
def token_type(self) -> str:
raise NotImplementedError
@abc.abstractclassmethod
def load_from_file(cls, vocab_file: str):
raise NotImplementedError
@property
def pad_idx(self) -> int:
return 0
@property
def eos_idx(self) -> int:
return 1
@property
def unk_idx(self) -> int:
return 2
def __repr__(self) -> str:
return '<{} vocab_size={}>'.format(type(self).__name__, self.vocab_size)
|
class CharacterTokenizer(Tokenizer):
'Character tokenizer.'
def __init__(self, vocab_list: List[str]=None):
super().__init__()
if (vocab_list is None):
vocab_list = CHARACTER_VOCAB
for tok in ['<pad>', '<eos>', '<unk>']:
assert (tok not in vocab_list)
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
def encode(self, s: str) -> List[int]:
s = s.strip('\r\n ')
return ([self.vocab_to_idx(v) for v in s] + [self.eos_idx])
def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str:
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
@classmethod
def load_from_file(cls, vocab_file: str=None, vocab_list: List[str]=None):
if (vocab_file is not None):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
elif (vocab_list is not None):
pass
else:
raise ValueError('No vocabulary information give, please specify either vocab_file or vocab_list.')
return cls(vocab_list)
@property
def vocab_size(self) -> int:
return len(self._vocab_list)
@property
def token_type(self) -> str:
return 'character'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
return self._vocab_list[idx]
|
class CharacterSlotTokenizer(Tokenizer):
'Character tokenizer with slots.'
def __init__(self, vocab_list: List[str], slots: List[str]):
super().__init__()
for tok in ['<pad>', '<eos>', '<unk>']:
assert (tok not in vocab_list)
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
self.space_idx = self.vocab_to_idx(' ')
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self._vocab_list)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self._vocab_list)): self.slots[i] for i in range(len(self.slots))}
def encode(self, sent: str, iobs: str) -> List[int]:
sent = sent.strip('\r\n ')
iobs = iobs.strip('\r\n ')
sent = re.sub(' +', ' ', sent).strip(' ')
sent = sent.split(' ')
iobs = iobs.split(' ')
assert (len(sent) == len(iobs)), f'transcription and iobs should have same number of words (split by space)'
if (sent[0] == 'BOS'):
sent = sent[1:]
iobs = iobs[1:]
if (sent[(- 1)] == 'EOS'):
sent = sent[:(- 1)]
iobs = iobs[:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
tokens += [self.vocab_to_idx(v) for v in wrd]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
if (i == (len(sent) - 1)):
tokens.append(self.eos_idx)
elif ((len(tokens) > 0) and (tokens[(- 1)] != self.space_idx)):
tokens.append(self.space_idx)
assert (tokens[(- 1)] == self.eos_idx)
return tokens
def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str:
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
@classmethod
def load_from_file(cls, vocab_file: str, slots_file: str):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in [slot for slot in org_slots if (slot != 'O')]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(vocab_list, slots)
@property
def vocab_size(self) -> int:
return (len(self._vocab_list) + len(self.slots))
@property
def token_type(self) -> str:
return 'character-slot'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
idx = int(idx)
if (idx < len(self._vocab_list)):
return self._vocab_list[idx]
else:
token = self.id2slot[idx]
if (token[0] == 'B'):
return (token + ' ')
elif (token[0] == 'E'):
return (' ' + token)
else:
raise ValueError('id2slot get:', token)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.