code
stringlengths
17
6.64M
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='./data/LibriSpeech', type=str, help='Path to raw LibriSpeech dataset') parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--delta', default=True, type=boolean_string, help='Append Delta', required=False) parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False) args = parser.parse_args() return args
def acoustic_preprocess(args, tr_set, dim): for s in tr_set: print('') print('Preprocessing', s, 'data...', end='') todo = list(Path(os.path.join(args.data_path, s)).rglob('*.flac')) print(len(todo), 'audio files found in', s) if (args.name == 'None'): output_dir = os.path.join(args.output_path, '_'.join(['libri', (str(args.feature_type) + str(dim))])) else: output_dir = os.path.join(args.output_path, args.name) if (not os.path.exists(output_dir)): os.makedirs(output_dir) cur_path = os.path.join(output_dir, s) if (not os.path.exists(cur_path)): os.makedirs(cur_path) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, delta=args.delta, delta_delta=args.delta_delta, cmvn=args.apply_cmvn, save_feature=os.path.join(cur_path, str(file).split('/')[(- 1)].replace('.flac', ''))) for file in tqdm(todo))) sorted_todo = [os.path.join(s, str(todo[idx]).split('/')[(- 1)].replace('.flac', '.npy')) for idx in reversed(np.argsort(tr_x))] df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': 'None'}) df.to_csv(os.path.join(output_dir, (s + '.csv'))) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() mel_dim = (num_mels * ((1 + int(args.delta)) + int(args.delta_delta))) mfcc_dim = (num_mfcc * ((1 + int(args.delta)) + int(args.delta_delta))) dim = (num_freq if (args.feature_type == 'linear') else (mfcc_dim if (args.feature_type == 'mfcc') else mel_dim)) print('Delta: ', args.delta, '. Delta Delta: ', args.delta_delta, '. Cmvn: ', args.apply_cmvn) print('Data sets :') sets = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other'] for (idx, s) in enumerate(sets): print('\t', idx, ':', s) tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ') tr_set = [sets[int(t)] for t in tr_set.split(' ')] acoustic_preprocess(args, tr_set, dim)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def bracket_underscore(string): split = string.split('[') utterance_name = split[0] number = int(split[1].split(']')[0]) string = ((utterance_name + '_') + str((number + 1))) return string
def underscore_bracket(string): split = string.split('_') number = int(split[(- 1)][:(- 4)]) utterance_name = '_'.join(split[:(- 1)]) string = (((utterance_name + '[') + str((number - 1))) + ']') return string
def get_preprocess_args(): parser = argparse.ArgumentParser() parser.add_argument('--flac_path', default='../../data/mosei/flac', type=str, help='Path to MOSEI segmented FLAC files') parser.add_argument('--output_path', default='../../data/mosei', type=str, help='Path to store segmented npys', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) args = parser.parse_args() return args
def extract_mosei(args, dim): assert os.path.exists(args.flac_path), f'{args.flac_path} not exists' todo = list(Path(args.flac_path).glob('*.flac')) print(len(todo), 'audio files found in MOSEI') assert (args.feature_type in ['mel', 'linear', 'fbank']), 'Feature type unsupported' if (not os.path.exists(args.output_path)): os.makedirs(args.output_path) npy_dir = os.path.join(args.output_path, (str(args.feature_type) + str(dim))) for target_dir in [npy_dir]: if os.path.exists(target_dir): decision = input(f'{target_dir} already exists. Remove it? [Y/N]: ') if (decision.upper() == 'Y'): print(f'Removing {target_dir}') shutil.rmtree(target_dir) else: print('Abort') exit(0) os.makedirs(target_dir) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, cmvn=args.apply_cmvn, save_feature=os.path.join(npy_dir, str(file).split('/')[(- 1)].replace('.flac', ''))) for file in tqdm(todo)))
def main(): args = get_preprocess_args() dim = (num_freq if (args.feature_type == 'linear') else mel_dim) extract_mosei(args, dim)
def get_preprocess_args(): parser = argparse.ArgumentParser() parser.add_argument('--npy_path', default='../../data/mosei/mel160', type=str, help='Path to MOSEI segmented NPY files') parser.add_argument('--csv_path', default='../../data/mosei/mosei_no_semi.csv', type=str, help='Path to mosei_no_semi.csv', required=False) args = parser.parse_args() return args
def add_length(args): csv = pd.read_csv(args.csv_path) lengths = [] for (index, row) in csv.iterrows(): npy = np.load(os.path.join(args.npy_path, (row.key + '.npy'))) lengths.append(npy.shape[0]) csv['length'] = lengths csv.to_csv(args.csv_path, index=False)
def main(): args = get_preprocess_args() add_length(args)
def bracket_underscore(string): split = string.split('[') utterance_name = split[0] number = int(split[1].split(']')[0]) string = ((utterance_name + '_') + str((number + 1))) return string
def underscore_bracket(string): split = string.split('_') number = int(split[(- 1)][:(- 4)]) utterance_name = '_'.join(split[:(- 1)]) string = (((utterance_name + '[') + str((number - 1))) + ']') return string
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='/home/leo/d/datasets/MOSEI/Raw/Audio/Full/WAV_16000', type=str, help='Path to MOSEI non-segmented WAV files') parser.add_argument('--output_path', default='../../data/mosei', type=str, help='Path to store segmented flac and npys. Should already contains mosei_no_semi.csv', required=False) args = parser.parse_args() return args
def segment_mosei(args): output_dir = args.output_path mosei_summary = os.path.join(output_dir, 'mosei_no_semi.csv') flac_dir = os.path.join(output_dir, 'flac') assert os.path.exists(mosei_summary), 'Output path should already be created with a mosei_no_semi.csv inside it' for target_dir in [flac_dir]: if os.path.exists(target_dir): decision = input(f'{target_dir} already exists. Remove it? [Y/N]: ') if (decision.upper() == 'Y'): shutil.rmtree(target_dir) print(f'{target_dir} removed') else: print('Abort') exit(0) os.makedirs(target_dir) df = pd.read_csv(mosei_summary) for (index, row) in df.iterrows(): underscore = row.key wavname = f'{row.filename}.wav' wavpath = os.path.join(args.data_path, wavname) assert os.path.exists(wavpath), f'wav not exists: {wavpath}' wav = AudioSegment.from_wav(wavpath) start = int((row.start * 1000)) end = int((row.end * 1000)) assert (start >= 0), f'{underscore} has negative start time' assert (end >= 0), f'{underscore} has negative end time' seg_wav = wav[start:end] seg_flacpath = os.path.join(flac_dir, f'{underscore}.flac') seg_wav.export(seg_flacpath, format='flac', parameters=['-ac', '1', '-sample_fmt', 's16', '-ar', '16000'])
def main(): args = get_preprocess_args() segment_mosei(args)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def sdk2npy(string): split = string.split('[') utterance_name = split[0] number = int(split[1].split(']')[0]) string = (((utterance_name + '_') + str((number + 1))) + '.npy') return string
def npy2sdk(string): split = string.split('_') number = int(split[(- 1)][:(- 4)]) utterance_name = '_'.join(split[:(- 1)]) string = (((utterance_name + '[') + str((number - 1))) + ']') return string
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='/home/leo/d/datasets/MOSI/Raw/Audio/WAV_16000/Segmented', type=str, help='Path to raw MOSI segmented audio dataset') parser.add_argument('--output_path', default='../data/', type=str, help='Path to store output', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--n_tokens', default=5000, type=int, help='Vocabulary size of target', required=False) args = parser.parse_args() return args
def acoustic_preprocess(args, dim): todo = list(Path(args.data_path).glob('*.wav')) print(len(todo), 'audio files found in MOSI') assert (args.feature_type in ['mel', 'linear', 'fbank']), 'Feature type unsupported' output_dir = os.path.join(args.output_path, '_'.join(['mosi', (str(args.feature_type) + str(dim))])) if (not os.path.exists(output_dir)): os.makedirs(output_dir) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, cmvn=args.apply_cmvn, save_feature=os.path.join(output_dir, str(file).split('/')[(- 1)].replace('.wav', ''))) for file in tqdm(todo))) DATASET = md.cmu_mosi try: md.mmdataset(DATASET.labels, args.data_path) except RuntimeError: print('Labels have been downloaded previously.') label_field = 'CMU_MOSI_Opinion_Labels' features = [label_field] recipe = {feat: (os.path.join(args.data_path, feat) + '.csd') for feat in features} dataset = md.mmdataset(recipe) dataset.align(label_field) utterances = os.listdir(output_dir) for segment_sdk in dataset[label_field].keys(): segment_npy = sdk2npy(segment_sdk) try: assert (segment_npy in utterances) except AssertionError: print('AssertionError: Cannot find corresponding utterance for given label') try: assert (npy2sdk(segment_npy) == segment_sdk) except AssertionError: print('AssertionError: npt2sdk funtion has bug') sorted_xlen = [] sorted_y = [] sorted_todo = [] for idx in reversed(np.argsort(tr_x)): filename = str(todo[idx]).split('/')[(- 1)].replace('.wav', '.npy') sdkname = npy2sdk(filename) if (sdkname in dataset[label_field].keys()): sorted_xlen.append(tr_x[idx]) sorted_y.append(dataset[label_field][sdkname]['features'].reshape((- 1))[0]) sorted_todo.append(filename) df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(sorted_xlen), 'label': sorted_y}) train_split = DATASET.standard_folds.standard_train_fold dev_split = DATASET.standard_folds.standard_valid_fold test_split = DATASET.standard_folds.standard_test_fold npy_dir = os.path.join(output_dir, 'npy') if (not os.path.exists(npy_dir)): os.mkdir(npy_dir) def classify(file_name): file_name = file_name[0] prefix = '_'.join(file_name.split('_')[:(- 1)]) shutil.move(os.path.join(output_dir, file_name), os.path.join(npy_dir, file_name)) if (prefix in train_split): return 'train' elif (prefix in dev_split): return 'dev' elif (prefix in test_split): return 'test' else: assert 0, 'Error in preprocess_mosi.py:146' belong = np.apply_along_axis((lambda file_name: classify(file_name)), 1, df['file_path'].values.reshape((- 1), 1)) df.insert(len(df.columns), 'set', belong) train_frame = df[(df.set == 'train')] dev_frame = df[(df.set == 'dev')] test_frame = df[(df.set == 'test')] df.to_csv(os.path.join(output_dir, 'all.csv')) train_frame.to_csv(os.path.join(output_dir, 'train.csv')) dev_frame.to_csv(os.path.join(output_dir, 'dev.csv')) test_frame.to_csv(os.path.join(output_dir, 'test.csv')) remain_npy = glob.glob(os.path.join(output_dir, '*.npy')) print((('Delete ' + str(len(remain_npy))) + ' unlabeled npy files:')) for npy in remain_npy: print(('delete ' + npy)) os.remove(npy) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() dim = (num_freq if (args.feature_type == 'linear') else mel_dim) acoustic_preprocess(args, dim)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='./data/timit', type=str, help='Path to raw TIMIT dataset') parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--delta', default=True, type=boolean_string, help='Append Delta', required=False) parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False) args = parser.parse_args() return args
def preprocess(args, dim): for s in ('train', 'dev', 'test'): print('') print(f'Preprocessing {s} data...', end='') todo = list(Path(os.path.join(args.data_path, s.upper())).rglob('*.[wW][aA][vV]')) if (len(todo) == 0): todo = list(Path(os.path.join(args.data_path, s)).rglob('*.[wW][aA][vV]')) print(len(todo), f'audio files found in {s} set') if (args.name == 'None'): output_dir = os.path.join(args.output_path, '_'.join(['timit', (str(args.feature_type) + str(dim))])) else: output_dir = os.path.join(args.output_path, args.name) if (not os.path.exists(output_dir)): os.makedirs(output_dir) cur_path = os.path.join(output_dir, s) if (not os.path.exists(cur_path)): os.makedirs(cur_path) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, delta=args.delta, delta_delta=args.delta_delta, cmvn=args.apply_cmvn, save_feature=os.path.join(cur_path, str(file).split('/')[(- 1)].split('.')[0])) for file in tqdm(todo))) sorted_todo = [os.path.join(s, (str(todo[idx]).split('/')[(- 1)].split('.')[0] + '.npy')) for idx in reversed(np.argsort(tr_x))] df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None}) df.to_csv(os.path.join(output_dir, (s + '.csv'))) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() mel_dim = (num_mels * ((1 + int(args.delta)) + int(args.delta_delta))) mfcc_dim = (num_mfcc * ((1 + int(args.delta)) + int(args.delta_delta))) dim = (num_freq if (args.feature_type == 'linear') else (mfcc_dim if (args.feature_type == 'mfcc') else mel_dim)) print('Delta: ', args.delta, '. Delta Delta: ', args.delta_delta, '. Cmvn: ', args.apply_cmvn) preprocess(args, dim)
def word_normalise(words): ret = [] for word in words: if (word.lower() in months): word = months[word.lower()] if (word.lower() in replace_words): word = replace_words[word.lower()] for regex in replace_vocab: word = re.sub(regex, '', word) word = re.sub('[\\.\\,\\!\\?;\\/]', '', word) ret.append(word) return ret
def sent_normalise(text, slots_split=None): (norm_slots, norm_texts) = ([], []) text_split = text.split(' ') if (slots_split is None): slots_split = (['O'] * len(text_split)) for idx in range(len(text_split)): if (text_split[idx] in '.,!?;/]'): continue if (text_split[idx] in reservations): for word in reservations[text_split[idx]].split(' '): norm_texts.append(word) norm_slots.append(slots_split[idx]) continue norm_text = normalise(word_normalise([text_split[idx]]), variety='AmE', verbose=False) for phrase in norm_text: if (phrase == ''): continue for word in re.split(' |\\-', phrase): word = re.sub('[\\.\\,\\!\\?;\\/]', '', word) if (word == ''): continue norm_texts.append(word) norm_slots.append(slots_split[idx]) return (norm_slots, norm_texts)
def process_raw_snips_file(file, out_f): with open(file) as f: content = f.readlines() content = [x.strip() for x in content] with open(out_f, 'w') as f: for (cnt, line) in enumerate(content): text = line.split(' <=> ')[0] intent = line.split(' <=> ')[1] text_split = [(x.replace('::', ':').split(':')[0] if (len(x.replace('::', ':').split(':')) == 2) else ' ') for x in text.split()] text_entities = ' '.join(text_split) slots_split = [x.replace('::', ':').split(':')[1] for x in text.split()] slots_entities = ' '.join(slots_split) assert (len(text_split) == len(slots_split)), (text_split, slots_split) f.write(('%d | BOS %s EOS | O %s | %s\n' % (cnt, text_entities, slots_entities, intent)))
def remove_IBO_from_snipt_vocab_slot(in_f, out_f): with open(in_f) as f: content = f.readlines() content = [x.strip() for x in content] for (idx, line) in enumerate(content): if (line != 'O'): content[idx] = line[len('B-'):] content = set(content) with open(out_f, 'w') as f: for line in content: f.write(('%s\n' % line))
def process_daniel_snips_file(content): content = [x.strip() for x in content] utt_ids = [x.split('\t', 1)[0] for x in content] valid_uttids = [x for x in utt_ids if (x.split('-')[1] == 'valid')] test_uttids = [x for x in utt_ids if (x.split('-')[1] == 'test')] train_uttids = [x for x in utt_ids if (x.split('-')[1] == 'train')] (utt2text, utt2slots, utt2intent) = ({}, {}, {}) assert (len(utt_ids) == len(set(utt_ids))) for line in content: (uttid, text, slots, intent) = line.split('\t') if (len(text.split()) != len(slots.split())): assert (len(text.split(' ')) == 2) empty_idx = (text.split().index(text.split(' ')[0].split()[(- 1)]) + 1) slots_list = slots.split() del slots_list[empty_idx] cleaned_slots = ' '.join(slots_list) assert (len(text.split()) == len(slots_list)) cleaned_text = ' '.join(text.split()) else: (cleaned_text, cleaned_slots) = (text, slots) cleaned_slots = ' '.join([(x.split('/')[1] if (x != 'O') else x) for x in cleaned_slots.split()]) utt2text[uttid] = cleaned_text utt2slots[uttid] = cleaned_slots utt2intent[uttid] = intent (test_utt2text, test_utt2slots, test_utt2intent) = ({}, {}, {}) (valid_utt2text, valid_utt2slots, valid_utt2intent) = ({}, {}, {}) (train_utt2text, train_utt2slots, train_utt2intent) = ({}, {}, {}) for utt in valid_uttids: valid_utt2text[utt] = utt2text[utt] valid_utt2slots[utt] = utt2slots[utt] valid_utt2intent[utt] = utt2intent[utt] for utt in test_uttids: test_utt2text[utt] = utt2text[utt] test_utt2slots[utt] = utt2slots[utt] test_utt2intent[utt] = utt2intent[utt] for utt in train_uttids: train_utt2text[utt] = utt2text[utt] train_utt2slots[utt] = utt2slots[utt] train_utt2intent[utt] = utt2intent[utt] assert (len(set(valid_utt2intent.values())) == len(set(test_utt2intent.values())) == len(set(train_utt2intent.values())) == 7) assert (len(valid_utt2intent.keys()) == len(test_utt2intent.keys()) == 700) assert (len(train_utt2intent.keys()) == 13084) def __return_set_of_slots(utt2slots): all_slots = [] for slot in utt2slots.values(): all_slots.extend(slot.split()) unique_slots = set(all_slots) return unique_slots assert (len(__return_set_of_slots(valid_utt2slots)) == len(__return_set_of_slots(test_utt2slots)) == len(__return_set_of_slots(train_utt2slots)) == 40) return ((train_utt2text, train_utt2slots, train_utt2intent), (valid_utt2text, valid_utt2slots, valid_utt2intent), (test_utt2text, test_utt2slots, test_utt2intent))
def map_and_link_snips_audio(snips_audio_dir, link_dir): result = [y for x in os.walk(snips_audio_dir) for y in glob(os.path.join(x[0], '*.mp3'))] for path in result: person = path.split('/')[8].split('_')[1] filename = path.split('/')[(- 1)] if (filename[:5] != 'snips'): continue uttid = filename.split('.')[0] new_uttid = ((person + '-') + filename) partition = uttid.split('-')[1] destination = os.path.join(link_dir, partition, new_uttid) shutil.copyfile(path, destination)
def create_multispk_for_snips(output_dir): speakers = 'Aditi Amy Brian Emma Geraint Ivy Joanna Joey Justin Kendra Kimberly Matthew Nicole Raveena Russell Salli'.split(' ') dataset_info = [{'split': 'test', 'num_utts': 700}, {'split': 'valid', 'num_utts': 700}, {'split': 'train', 'num_utts': 13084}] test_out_f = open(os.path.join(output_dir, 'all.iob.snips.txt'), 'w') for data in dataset_info: num_utts = data['num_utts'] split = data['split'] with open(os.path.join(output_dir, ('single-matched-snips.%s.w-intent' % split))) as f: content = f.readlines() utt2line = {x.strip().split()[0]: x.strip() for x in content} for spk in speakers: for num in range(num_utts): uttid = ('%s-snips-%s-%d' % (spk, split, num)) line = utt2line[('snips-%s-%d' % (split, num))] text = line.split('\t')[1].upper() slots = line.split('\t')[2] intent = line.split('\t')[3] test_out_f.write(('%s BOS %s EOS\tO %s %s\n' % (uttid, text, slots, intent))) test_out_f.close()
def apply_text_norm_and_modify_slots(all_tsv, output_dir): (train_dirs, valid_dirs, test_dirs) = process_daniel_snips_file(all_tsv) test_file = open(os.path.join(output_dir, 'single-matched-snips.test.w-intent'), 'w') vocab_slot = {} for uttid in tqdm.tqdm(test_dirs[0].keys(), desc='Text Normalising on testing set'): text = test_dirs[0][uttid] slots = test_dirs[1][uttid] intent = test_dirs[2][uttid] slots_split = slots.split() for s in slots_split: vocab_slot.setdefault(s, 0) vocab_slot[s] += 1 (norm_slots, norm_texts) = sent_normalise(text, slots_split) assert (len(norm_texts) == len(norm_slots)), (norm_texts, norm_slots) test_file.write(('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))) test_file.close() valid_file = open(os.path.join(output_dir, 'single-matched-snips.valid.w-intent'), 'w') for uttid in tqdm.tqdm(valid_dirs[0].keys(), desc='Text Normalising on validation set'): text = valid_dirs[0][uttid] slots = valid_dirs[1][uttid] intent = valid_dirs[2][uttid] slots_split = slots.split() for s in slots_split: vocab_slot.setdefault(s, 0) vocab_slot[s] += 1 (norm_slots, norm_texts) = sent_normalise(text, slots_split) assert (len(norm_texts) == len(norm_slots)), (norm_texts, norm_slots) valid_file.write(('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))) valid_file.close() train_file = open(os.path.join(output_dir, 'single-matched-snips.train.w-intent'), 'w') for uttid in tqdm.tqdm(train_dirs[0].keys(), desc='Text Normalising on training set'): text = train_dirs[0][uttid] slots = train_dirs[1][uttid] intent = train_dirs[2][uttid] slots_split = slots.split() for s in slots_split: vocab_slot.setdefault(s, 0) vocab_slot[s] += 1 (norm_slots, norm_texts) = sent_normalise(text, slots_split) assert (len(norm_texts) == len(norm_slots)), (norm_texts, norm_slots) train_file.write(('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))) train_file.close() vocab_file = open(os.path.join(output_dir, 'slots.txt'), 'w') vocab_file.write('\n'.join(sorted(list(vocab_slot.keys()), key=(lambda x: vocab_slot[x]), reverse=True)))
def sox_func(inputs): (files, root, out_root, speaker) = inputs for name in tqdm.tqdm(files, desc=('Process for speaker: ' + speaker)): if name.endswith('.mp3'): split = name.split('-')[1] out_dir = os.path.join(out_root, split) os.makedirs(out_dir, exist_ok=True) orig_file = os.path.join(root, name) new_file = os.path.join(out_dir, (((speaker + '-') + name.split('/')[(- 1)].split('.')[0]) + '.wav')) bashCommand = ((('sox ' + orig_file) + ' -t wav -c 1 -r 16000 -b 16 -e signed-integer ') + new_file) r = os.popen(bashCommand).read()
def sox_mp3_to_wav(in_root, out_root): os.makedirs(out_root, exist_ok=True) pool = Pool(16) inputs = [] for (root, dirs, files) in os.walk(in_root): print(('[Processing] enter directory %s' % root)) if (not len(files)): continue speaker = root.split('/')[(- 2)].split('_')[1] print(('[Processing] process %d audio files from speaker %s' % (len(files), speaker))) inputs.append((files, root, out_root, speaker)) pool.map(sox_func, inputs)
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.') parser.add_argument('-i', '--input_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to your LibriSpeech directory', required=False) parser.add_argument('-o', '--output_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to store output', required=False) parser.add_argument('-s', '--split_size', default=60, type=int, help='Split size in seconds', required=False) parser.add_argument('-a', '--audio_extension', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False) parser.add_argument('-n', '--name', default='-splitted', type=str, help='Name to append on the original directory', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for computation', required=False) args = parser.parse_args() return args
def split_and_save(input_file, current_split, args): (wav, sr) = torchaudio.load(input_file) chunk_size = (args.split_size * sr) (quotient, remainder) = divmod(wav.size(1), chunk_size) sections = [chunk_size for _ in range(quotient)] sections.append(remainder) splitted_wav = torch.split(wav, split_size_or_sections=sections, dim=1) check_sum = 0 for (i, w) in enumerate(splitted_wav): check_sum += w.size(1) file_name = os.path.basename(input_file).split('.')[0] new_file_name = file_name.replace(file_name, ((file_name + '-') + str(i))) new_file_path = input_file.replace(current_split, (current_split + args.name)) new_file_path = new_file_path.replace(file_name, new_file_name) if (args.input_path != args.output_path): new_file_path = new_file_path.replace(args.input_path, args.output_path) os.makedirs(os.path.dirname(new_file_path), exist_ok=True) torchaudio.save(new_file_path, w, sr) assert (check_sum == wav.size(1))
def generate_splits(args, tr_set, audio_extension): for (i, s) in enumerate(tr_set): if os.path.isdir(os.path.join(args.input_path, s.lower())): s = s.lower() elif os.path.isdir(os.path.join(args.input_path, s.upper())): s = s.upper() else: assert NotImplementedError print('') todo = list(Path(os.path.join(args.input_path, s)).rglob(('*' + audio_extension))) print(f'Preprocessing data in: {s}, {len(todo)} audio files found.') print('Splitting audio to shorter length...', flush=True) Parallel(n_jobs=args.n_jobs)((delayed(split_and_save)(str(file), s, args) for file in tqdm(todo))) print('All done, saved at', args.output_path, 'exit.')
def main(): args = get_preprocess_args() if ('librilight' in args.input_path.lower()): SETS = ['small', 'medium', 'large'] elif ('librispeech' in args.input_path.lower()): SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other'] elif ('timit' in args.input_path.lower()): SETS = ['TRAIN', 'TEST'] else: raise NotImplementedError for (idx, s) in enumerate(SETS): print('\t', idx, ':', s) tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ') tr_set = [SETS[int(t)] for t in tr_set.split(' ')] generate_splits(args, tr_set, args.audio_extension)
def main(): if (not os.path.isdir(KALDI_ROOT)): print('CHANGE THIS TO YOUR OWN KALDI ROOT: ', KALDI_ROOT) exit() if (not os.path.isdir(INPUT_PATH)): print('Invalid path for the preprocessed timit dataset: ', INPUT_PATH) print("Please run 'preprocess_timit.py' first!") exit() if (not os.path.isdir(SOURCE_DIR)): print('Invalid path for the source directory: ', SOURCE_DIR) print('Please read the Wiki page for instructions!') exit() if (not os.path.isdir(OUTPUT_PATH)): os.mkdir(OUTPUT_PATH) (x, ids) = ([], []) for s in INPUT_SETS: with open(os.path.join(INPUT_PATH, (s + '_x.pkl')), 'rb') as fp: x += pickle.load(fp) with open(os.path.join(INPUT_PATH, (s + '_id.pkl')), 'rb') as fp: ids += pickle.load(fp) assert (len(x) == len(ids)) print('[TIMIT-to-ARK] - ', 'Total Dataset len:', len(x)) all_inputs = {} for (idx, i) in enumerate(ids): i = str(i).strip('.wav').split('/') i = ((i[(- 2)].upper() + '_') + i[(- 1)].upper()) all_inputs[i] = np.asarray(x[idx]) for s in OUTPUT_SETS: if (not os.path.isdir(SOURCE_DIR)): raise NotADirectoryError('Source directory does not exist!', SOURCE_DIR) if (not os.path.isdir(((OUTPUT_PATH + '/') + str(s)))): os.mkdir(((OUTPUT_PATH + '/') + str(s))) partial_outputs = {} with open(os.path.join(SOURCE_DIR, (s + '/feats.scp')), 'r') as f: lines = f.readlines() for line in lines: line = line.split(' ')[0] if (line in all_inputs): partial_outputs[line] = all_inputs[line] assert (len(lines) == len(partial_outputs)) ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:{}/raw_mel_{}.ark,{}/{}/feats.scp'.format(OUTPUT_PATH, str(s), OUTPUT_PATH, str(s)) with kaldi_io.open_or_fd(ark_scp_output, 'wb') as f: for (key, mat) in tqdm(partial_outputs.items()): kaldi_io.write_mat(f, mat, key=key) print((("[TIMIT-to-ARK] - All done, saved at '" + str(OUTPUT_PATH)) + "' exit."))
class ApcAudioDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(ApcAudioDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) feat = self.extracter(wav) return feat def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_len = [len(x_b) for x_b in x_batch] x_pad_batch = pad_sequence(x_batch, batch_first=True) return (x_pad_batch, x_len)
class FeatDataset(Dataset): 'Base On-the-fly feature dataset by Andy T. Liu' def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(FeatDataset, self).__init__() self.extracter = extracter self.task_config = task_config self.libri_root = libri_root self.sample_length = task_config['sequence_length'] if (self.sample_length > 0): print('[Dataset] - Sampling random segments for training, sample length:', self.sample_length) self.root = file_path tables = [pd.read_csv(os.path.join(file_path, (s + '.csv'))) for s in sets] self.table = pd.concat(tables, ignore_index=True).sort_values(by=['length'], ascending=False) print('[Dataset] - Training data from these sets:', str(sets)) if (max_timestep > 0): self.table = self.table[(self.table.length < max_timestep)] if (max_timestep < 0): self.table = self.table[(self.table.length > ((- 1) * max_timestep))] X = self.table['file_path'].tolist() X_lens = self.table['length'].tolist() self.num_samples = len(X) print('[Dataset] - Number of individual training instances:', self.num_samples) self.X = [] (batch_x, batch_len) = ([], []) for (x, x_len) in zip(X, X_lens): batch_x.append(x) batch_len.append(x_len) if (len(batch_x) == bucket_size): if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME) and (self.sample_length == 0)): self.X.append(batch_x[:(bucket_size // 2)]) self.X.append(batch_x[(bucket_size // 2):]) else: self.X.append(batch_x) (batch_x, batch_len) = ([], []) if (len(batch_x) > 1): self.X.append(batch_x) def _sample(self, x): if (self.sample_length <= 0): return x if (len(x) < self.sample_length): return x idx = random.randint(0, (len(x) - self.sample_length)) return x[idx:(idx + self.sample_length)] def __len__(self): return len(self.X) def collate_fn(self, items): items = items[0] return items
class WaveDataset(Dataset): 'Base waveform dataset for Disiller by Heng-Jui Chang' def __init__(self, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super().__init__() self.task_config = task_config self.libri_root = libri_root self.sample_length = task_config['sequence_length'] if (self.sample_length > 0): print('[Dataset] - Sampling random segments for training, sample length:', self.sample_length) self.root = file_path tables = [pd.read_csv(os.path.join(file_path, (s + '.csv'))) for s in sets] self.table = pd.concat(tables, ignore_index=True).sort_values(by=['length'], ascending=False) print('[Dataset] - Training data from these sets:', str(sets)) if (max_timestep > 0): self.table = self.table[(self.table.length < max_timestep)] if (max_timestep < 0): self.table = self.table[(self.table.length > ((- 1) * max_timestep))] X = self.table['file_path'].tolist() X_lens = self.table['length'].tolist() self.num_samples = len(X) print('[Dataset] - Number of individual training instances:', self.num_samples) self.X = [] (batch_x, batch_len) = ([], []) for (x, x_len) in zip(X, X_lens): batch_x.append(x) batch_len.append(x_len) if (len(batch_x) == bucket_size): if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME) and (self.sample_length == 0)): self.X.append(batch_x[:(bucket_size // 2)]) self.X.append(batch_x[(bucket_size // 2):]) else: self.X.append(batch_x) (batch_x, batch_len) = ([], []) if (len(batch_x) > 1): self.X.append(batch_x) def _sample(self, x): if (self.sample_length <= 0): return x if (len(x) < self.sample_length): return x idx = random.randint(0, (len(x) - self.sample_length)) return x[idx:(idx + self.sample_length)] def __len__(self): return len(self.X) def collate_fn(self, items): items = items[0] assert (len(items) == 4), '__getitem__ should return (wave_input, wave_orig, wave_len, pad_mask)' return items
def freeze_model(model): 'Freeze all parameters in a model.' for param in model.parameters(): param.requires_grad = False
class UpstreamPretrainExpert(nn.Module): '\n The Distiller pretrain expert\n ' def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs): super().__init__() self.datarc = datarc self.device = device self.multi_gpu = multi_gpu if (type(upstream_config) == str): self.upstream_config = yaml.load(open(upstream_config, 'r'), Loader=yaml.FullLoader) print('[UpstreamPretrainExpert] - Using upstream config from:', upstream_config) elif (type(upstream_config) == dict): self.upstream_config = upstream_config print('[UpstreamPretrainExpert] - Using upstream config from the previous experiment.') else: raise ValueError self._get_train_dataloader() print('[UpstreamPretrainExpert] - Initializing model...') model_config = DistillerConfig(self.upstream_config['distiller']) self.model = DistillerForPretrain(model_config, edict(self.upstream_config['teacher'])) if self.multi_gpu: self.model = torch.nn.DataParallel(self.model) print(('[UpstreamPretrainExpert] - Multi-GPU training Enabled: ' + str(torch.cuda.device_count()))) print(('[UpstreamPretrainExpert] - Number of parameters: ' + str(sum((p.numel() for p in self.model.parameters() if p.requires_grad))))) def _get_train_dataloader(self): dataset = OnlineWaveDataset(self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc) self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn) def load_model(self, all_states): if self.multi_gpu: self.model.module.distiller.load_state_dict(all_states['Distiller']) else: self.model.distiller.load_state_dict(all_states['Distiller']) def add_state_to_save(self, all_states): all_states['Distiller'] = (self.model.float().distiller.state_dict() if (not self.multi_gpu) else self.model.float().module.distiller.state_dict()) all_states['Config'] = self.upstream_config return all_states def get_train_dataloader(self): return self.dataloader def forward(self, data, records={}, global_step=0, log_step=1000, **kwargs): '\n Args:\n data:\n [wave_input, pad_mask]\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss\n ' (wave_input, wave_orig, wave_len, pad_mask) = data wave_input = wave_input.to(self.device) wave_len = wave_len.to(self.device) pad_mask = pad_mask.type(wave_input.dtype).to(self.device) (loss, other_res) = self.model(wave_input, wave_orig, wave_len, pad_mask, return_other=((global_step % log_step) == 0)) if ((global_step % log_step) == 0): for (key, value) in other_res.items(): if isinstance(value, torch.Tensor): value = float(value.mean().cpu().item()) records[key] = value return (loss, records) def on_before_zero_grad(self): pass def log_records(self, records, logger, prefix, global_step, **kwargs): "\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n " for (key, values) in records.items(): if (isinstance(values, torch.Tensor) and (len(values.shape) > 1)): logger.add_image(f'{prefix}{key}', values, global_step=global_step) elif isinstance(values, float): logger.add_scalar(f'{prefix}{key}', values, global_step=global_step)
class DistillerForPretrain(nn.Module): '\n Distiller for pretraining\n ' def __init__(self, config: DistillerConfig, teacher_config: edict): super().__init__() self.config = config self.distiller = DistillerModel(config) self.teacher_config = teacher_config teacher = torch.hub.load('s3prl/s3prl', teacher_config.model) if ((teacher_config.model.find('hubert') >= 0) or (teacher_config.model.find('wav2vec2') >= 0)): teacher.model.encoder.layerdrop = 0 print("[DistillerForPretrain] - Disabled teacher's encoder layerdrop") assert (self.distiller.n_tasks <= teacher_config.n_layers), (self.distiller.n_tasks, teacher_config.n_layers) self.teacher = teacher freeze_model(self.teacher) print('[DistillerForPretrain] - Using {} as teacher with {} layers'.format(teacher_config.model, teacher_config.n_layers)) if (config.loss_type == 'l1'): self.loss_func = nn.L1Loss(reduction='none') elif (config.loss_type == 'l2'): self.loss_func = nn.MSELoss(reduction='none') else: raise NotImplementedError(config.loss_type) self.cosine_loss = config.cosine_loss if (self.cosine_loss > 0): print('[DistillerForPretrain] - Enabled cosine similarity loss.') if config.init_teacher_conv_layers: print('[DistillerForPretrain] - Initializing feature extractor from teacher') self.distiller.feature_extractor.load_state_dict(self.teacher.model.feature_extractor.state_dict()) if (self.distiller.post_extract_proj is not None): self.distiller.post_extract_proj.load_state_dict(self.teacher.model.post_extract_proj.state_dict()) if config.init_teacher_encoder_layers: print('[DistillerForPretrain] - Initializing encoder from teacher') self.distiller.encoder.pos_conv.load_state_dict(self.teacher.model.encoder.pos_conv.state_dict()) for l in range(config.encoder_layers): self.distiller.encoder.layers[l].load_state_dict(self.teacher.model.encoder.layers[l].state_dict()) def forward(self, wave_input: torch.Tensor, wave_orig: list, wave_len: torch.Tensor, pad_mask: torch.Tensor, return_other: bool=False): '\n Forward function.\n Input:\n wave_input: FloatTensor (B x T_wave)\n wave_orig: List of FloatTensor\n wave_len: LongTensor (B)\n pad_mask: FloatTensor (B x T)\n return_other: Bool (returns other information for logging)\n ' (feat, feat_final, pred, pad_mask) = self.distiller(wave_input, pad_mask) with torch.no_grad(): wave_orig = [wave.to(wave_input.device) for wave in wave_orig] with torch.cuda.amp.autocast(False): teacher_hiddens = self.teacher(wave_orig) if (self.config.task_emb_type == 'none'): teacher_hiddens = teacher_hiddens['hidden_states'][self.config.n_tasks] teacher_hiddens = teacher_hiddens.unsqueeze(1) else: if (self.config.task_emb_type in ['expand-last', 'hnet', 'self-hidden']): teacher_hiddens = [teacher_hiddens['hidden_states'][i] for i in self.distiller.pred_layer_id] else: teacher_hiddens = teacher_hiddens['hidden_states'][1:] teacher_hiddens = torch.stack(teacher_hiddens, dim=1) (total_loss, rec_loss, rec_layer_loss, feat_pen, sim_loss, sim_layer_loss) = self.compute_loss(feat, pred, teacher_hiddens, return_other) if return_other: with torch.no_grad(): other_res = {'rec_loss': rec_loss, 'feat_pen': feat_pen, 'sim_loss': sim_loss, 'norm_feat_final': feat_final.pow(2).mean()} teacher_norm = torch.abs(teacher_hiddens).mean((0, 2, 3)) if (self.config.task_emb_type == 'none'): other_res[f'rec_l{self.config.n_tasks}'] = rec_layer_loss[0] other_res[f'tar_norm_l{self.config.n_tasks}'] = teacher_norm[0] if (sim_layer_loss is not None): other_res[f'sim_l{self.config.n_tasks}'] = sim_layer_loss[0] else: for i in range(self.config.n_tasks): layer_id = (i + 1) if (self.config.task_emb_type in ['expand-last', 'hnet', 'self-hidden']): layer_id = self.distiller.pred_layer_id[i] other_res[f'rec_l{layer_id}'] = rec_layer_loss[i] other_res[f'tar_norm_l{layer_id}'] = teacher_norm[i] if (sim_layer_loss is not None): other_res[f'sim_l{layer_id}'] = sim_layer_loss[i] if (self.config.task_emb_type not in ['expand-last', 'hnet', 'self-hidden']): other_res['norm_task_emb'] = self.distiller.task_embedding.weight.pow(2).mean() else: other_res = None return (total_loss, other_res) def compute_loss(self, feat, pred, target, return_other=False): '\n Computes loss.\n Inputs:\n feat: B x T x D\n pred: B x N x T x D\n target: B x N x T x D\n ' assert (pred.shape == target.shape), (pred.shape, target.shape) rec_loss = self.loss_func(pred, target) if return_other: with torch.no_grad(): rec_layer_loss = rec_loss.mean((0, 2, 3)) else: rec_layer_loss = None rec_loss = rec_loss.mean() if (self.cosine_loss > 0): sim_loss = (- F.logsigmoid(F.cosine_similarity(pred, target, dim=(- 1)))) if return_other: with torch.no_grad(): sim_layer_loss = sim_loss.mean((0, 2)) else: sim_layer_loss = None sim_loss = sim_loss.mean() else: sim_loss = 0 sim_layer_loss = None feat_pen = feat.float().pow(2).mean() total_loss = ((rec_loss + (feat_pen * self.config.feat_pen_loss)) + (sim_loss * self.cosine_loss)) return (total_loss, rec_loss, rec_layer_loss, feat_pen, sim_loss, sim_layer_loss)
class KaldiAcousticDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(KaldiAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) feat = self.extracter(wav.squeeze()) return feat def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_pad_batch = pad_sequence(x_batch, batch_first=True) return generate_masked_acoustic_model_data(spec=(x_pad_batch,), config=self.task_config)
class OnlineAcousticDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, target_level=(- 25), **kwargs): max_timestep *= 160 super(OnlineAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) self.target_level = target_level self.sample_length = (self.sample_length * 160) def _normalize_wav_decibel(self, wav): 'Normalize the signal to the target level' if (self.target_level == 'None'): return wav rms = wav.pow(2).mean().pow(0.5) scalar = ((10 ** (self.target_level / 20)) / (rms + 1e-10)) wav = (wav * scalar) return wav def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) wav = self._normalize_wav_decibel(wav.squeeze()) return wav def _process_x_pad_batch(self, x_pad_batch): if (self.libri_root is not None): x_pad_batch = x_pad_batch.unsqueeze(1) feat_list = self.extracter(x_pad_batch) return generate_masked_acoustic_model_data(feat_list, config=self.task_config) def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_pad_batch = pad_sequence(x_batch, batch_first=True) return self._process_x_pad_batch(x_pad_batch)
class UpstreamPretrainExpert(nn.Module): '\n The Mockingjay pretrain expert\n ' def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs): super(UpstreamPretrainExpert, self).__init__() self.datarc = datarc self.device = device self.multi_gpu = multi_gpu if (type(upstream_config) == str): self.upstream_config = yaml.load(open(upstream_config, 'r'), Loader=yaml.FullLoader) print('[UpstreamPretrainExpert] - Using upstream config from:', upstream_config) elif (type(upstream_config) == dict): self.upstream_config = upstream_config print('[UpstreamPretrainExpert] - Using upstream config from the previous experiment.') else: raise ValueError if (('libri_root' in self.datarc) and ('kaldi' in self.upstream_config['audio'])): print('[UpstreamPretrainExpert] - Using kaldi feature extracter, on-the-fly feature extraction') (extracter, input_dim, _) = get_extracter(self.upstream_config['audio']) output_dim = None elif ('libri_root' in self.datarc): print('[UpstreamPretrainExpert] - Using online preprocessor, on-the-fly feature extraction') (extracter, input_dim, output_dim) = get_preprocessor(self.upstream_config['audio']) else: print('[UpstreamPretrainExpert] - Using features pre-extracted and saved') (extracter, input_dim) = (None, self.upstream_config['transformer']['input_dim']) output_dim = None print('[UpstreamPretrainExpert] - Input dim:', input_dim) self._get_train_dataloader(extracter) print('[UpstreamPretrainExpert] - Initializing model...') model_config = TransformerConfig(self.upstream_config['transformer']) setattr(model_config, 'loss', self.upstream_config['task']['loss']) self.model = TransformerForMaskedAcousticModel(model_config, input_dim, output_dim=output_dim) if self.multi_gpu: self.model = torch.nn.DataParallel(self.model) print(('[UpstreamPretrainExpert] - Multi-GPU training Enabled: ' + str(torch.cuda.device_count()))) print(('[UpstreamPretrainExpert] - Number of parameters: ' + str(sum((p.numel() for p in self.model.parameters() if p.requires_grad))))) def _get_train_dataloader(self, extracter): if (('libri_root' in self.datarc) and ('kaldi' not in self.upstream_config['audio'])): dataset = OnlineAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc) else: dataset = KaldiAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], **self.datarc) self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn) def load_model(self, init_ckpt): assert (('Transformer' in init_ckpt) and ('SpecHead' in init_ckpt)) if self.multi_gpu: self.model.module.Transformer.load_state_dict(init_ckpt['Transformer']) self.model.module.SpecHead.load_state_dict(init_ckpt['SpecHead']) else: self.model.Transformer.load_state_dict(init_ckpt['Transformer']) self.model.SpecHead.load_state_dict(init_ckpt['SpecHead']) def loss_to_device(self): (self.model.loss.to(self.device) if (not self.multi_gpu) else self.model.module.loss.to(self.device)) def add_state_to_save(self, all_states): all_states['SpecHead'] = (self.model.SpecHead.state_dict() if (not self.multi_gpu) else self.model.module.SpecHead.state_dict()) all_states['Transformer'] = (self.model.Transformer.state_dict() if (not self.multi_gpu) else self.model.module.Transformer.state_dict()) all_states['Upstream_Config'] = self.upstream_config return all_states def get_train_dataloader(self): return self.dataloader def forward(self, data, records={}, global_step=0, log_step=1000, **kwargs): '\n Args:\n data:\n [spec_masked, pos_enc, mask_label, attn_mask, spec_target]\n \n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss \n ' (spec_masked, pos_enc, mask_label, attn_mask, spec_target) = (data[0], data[1], data[2], data[3], data[4]) spec_masked = spec_masked.to(self.device) if (pos_enc.dim() == 3): pos_enc = pos_enc.to(self.device) elif (pos_enc.dim() == 2): pos_enc = pos_enc.to(self.device).expand(spec_masked.size(0), *pos_enc.size()) mask_label = mask_label.to(self.device) attn_mask = attn_mask.to(self.device) spec_target = spec_target.to(self.device) (loss, pred_spec) = self.model(spec_masked, pos_enc, mask_label, attn_mask, spec_target) if ((global_step % log_step) == 0): spec_list = [spec_masked, pred_spec, spec_target] name_list = ['mask_spec', 'pred_spec', 'true_spec'] for i in range(len(spec_list)): spec = plot_spectrogram_to_numpy(spec_list[i][0].data.cpu().numpy()) records[name_list[i]] = spec return (loss, records) def on_before_zero_grad(self): pass def log_records(self, records, logger, prefix, global_step, **kwargs): "\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n " for (key, values) in records.items(): logger.add_image(f'{prefix}{key}', values, global_step=global_step)
class TransformerForMaskedAcousticModel(TransformerInitModel): "\n Transformer model with the masked acoustic modeling head.\n This module comprises the Transformer model followed by the masked acoustic modeling head.\n\n Params:\n `config`: a TransformerConfig class instance with the configuration to build a new model\n `intput_dim`: int, input dimension of model\n `output_dim`: int, output dimension of model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `spec_input`: a torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n with the selected frames processed as masked frames during training,\n generated by the `process_train_MAM_data()` function in `transformer/mam.py`.\n `pos_enc`: a torch.LongTensor of shape [batch_size, sequence_length, hidden_size],\n generated by the `fast_position_encoding()` function in `transformer/mam.py`.\n `masked_label`: masked acoustic modeling labels - torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [1, 0]. All labels set to -1 are ignored, the loss\n is only computed for the labels set to 1.\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `spce_label`: a torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n which are the ground truth spectrogram used as reconstruction labels.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `spec_label` and `mask_label` is not `None`:\n Outputs the masked acoustic modeling loss and predicted spectrogram.\n if `spec_label` and `mask_label` is `None`:\n Outputs the masked acoustic modeling predicted spectrogram of shape [batch_size, sequence_length, output_dim].\n\n Example usage:\n\n ```python\n spec_input = torch.LongTensor(spec_frames)\n pos_enc = torch.LongTensor(position_encoding(seq_len=len(spec_frames)))\n\n config = TransformerConfig(config)\n\n model = TransformerForMaskedAcousticModel(config)\n masked_spec_logits = model(spec_input, pos_enc)\n ```\n " def __init__(self, config, input_dim, output_dim, output_attentions=False, keep_multihead_output=False): super(TransformerForMaskedAcousticModel, self).__init__(config, output_attentions) self.Transformer = TransformerModel(config, input_dim, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.SpecHead = TransformerSpecPredictionHead(config, (output_dim if (output_dim is not None) else input_dim)) self.apply(self.init_Transformer_weights) loss = {'L1': nn.L1Loss(), 'MSE': nn.MSELoss()} self.loss = (loss[config.loss] if hasattr(config, 'loss') else loss['L1']) def forward(self, spec_input, pos_enc, mask_label=None, attention_mask=None, spec_label=None, head_mask=None): outputs = self.Transformer(spec_input, pos_enc, attention_mask, output_all_encoded_layers=False, head_mask=head_mask) if self.output_attentions: (all_attentions, sequence_output) = outputs else: sequence_output = outputs (pred_spec, pred_state) = self.SpecHead(sequence_output) if ((spec_label is not None) and (mask_label is not None)): assert (mask_label.sum() > 0), 'Without any masking, loss might go NaN. Check your pretrain data processing (s3prl/pretrain/mockingjay/task.py)' masked_spec_loss = self.loss(pred_spec.masked_select(mask_label), spec_label.masked_select(mask_label)) return (masked_spec_loss, pred_spec) elif self.output_attentions: return (all_attentions, pred_spec) return (pred_spec, pred_state)
class ApcAudioDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(ApcAudioDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) feat = self.extracter(wav) return feat def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_len = [len(x_b) for x_b in x_batch] x_pad_batch = pad_sequence(x_batch, batch_first=True) return (x_pad_batch, x_len)
class Runner(): '\n Used to handle high-level concepts of a ML experiment\n eg. training loop, evaluation loop, upstream propagation, optimization, tensorboard logging, checkpoint saving\n ' def __init__(self, args, config): self.args = args self.config = config self.logger = SummaryWriter(args.expdir) self.init_ckpt = (torch.load(self.args.init_ckpt, map_location='cpu') if self.args.init_ckpt else {}) self.upstream = self._get_upstream() def _get_upstream(self): init_upstream = self.init_ckpt.get('Upstream_Config') if init_upstream: self.args.upstream_config = init_upstream module_path = f'pretrain.{self.args.upstream}.pretrain_expert' Upstream = getattr(importlib.import_module(module_path), 'UpstreamPretrainExpert') upstream = Upstream(self.config['pretrain_expert']['datarc'], self.args.upstream_config, self.args.device, self.args.multi_gpu).to(self.args.device) assert hasattr(upstream, 'device') assert hasattr(upstream, 'forward') assert hasattr(upstream, 'load_model') assert hasattr(upstream, 'add_state_to_save') assert hasattr(upstream, 'on_before_zero_grad') assert hasattr(upstream, 'get_train_dataloader') if (self.init_ckpt != {}): print('[Runner] - Loading upstream weights from the previous experiment') upstream.load_model(self.init_ckpt) if hasattr(upstream, 'loss_to_device'): print('[Runner] - Loss to device') upstream.loss_to_device() return upstream def _get_optimizer(self, model_params): optimizer = get_optimizer(model_params, self.config['runner']['total_steps'], self.config['optimizer']) if (self.init_ckpt != {}): init_optimizer = self.init_ckpt.get('Optimizer') assert init_optimizer print('[Runner] - Loading optimizer weights from the previous experiment') optimizer.load_state_dict(init_optimizer) return optimizer def _get_scheduler(self, optimizer): scheduler = get_scheduler(optimizer, self.config['runner']['total_steps'], self.config['scheduler']) if (self.init_ckpt != {}): init_scheduler = self.init_ckpt.get('Scheduler') assert init_scheduler print('[Runner] - Loading scheduler weights from the previous experiment') scheduler.load_state_dict(init_scheduler) return scheduler def train(self): self.upstream.train() gradient_accumulate_steps = self.config['runner']['gradient_accumulate_steps'] train_batch_size = self.config['pretrain_expert']['datarc']['train_batch_size'] print('[Runner] - Accumulated batch size:', (train_batch_size * gradient_accumulate_steps)) dataloader = self.upstream.get_train_dataloader() n_epochs = self.config['runner']['n_epochs'] if (n_epochs > 0): total_steps = int(((n_epochs * len(dataloader.dataset)) / gradient_accumulate_steps)) print(f'[Runner] - Training for {n_epochs} epochs, which is equivalent to {total_steps} steps') else: total_steps = self.config['runner']['total_steps'] n_epochs = int(((total_steps * gradient_accumulate_steps) / len(dataloader.dataset))) print(f'[Runner] - Training for {total_steps} steps, which is approximately {n_epochs} epochs') assert (total_steps > self.config['runner']['log_step']) assert (total_steps > self.config['runner']['save_step']) amp = self.config['runner'].get('fp16', False) if amp: print('[Runner] - Enabled fp16 training') scaler = torch.cuda.amp.GradScaler() model_params = [self.upstream.model] optimizer = self._get_optimizer(model_params) scheduler = None if self.config.get('scheduler'): scheduler = self._get_scheduler(optimizer) pbar = tqdm(total=total_steps, dynamic_ncols=True, desc='overall') init_step = self.init_ckpt.get('Step') if init_step: pbar.n = init_step all_loss = 0 backward_steps = 0 records = defaultdict(list) prefix = f'{self.args.upstream}/train-' while (pbar.n < pbar.total): for data in tqdm(dataloader, dynamic_ncols=True, desc='train'): try: if (pbar.n >= pbar.total): break global_step = (pbar.n + 1) with torch.cuda.amp.autocast(enabled=amp): (loss, records) = self.upstream(data, records=records, global_step=global_step, log_step=self.config['runner']['log_step']) if (gradient_accumulate_steps > 1): loss = (loss / gradient_accumulate_steps) if self.args.multi_gpu: loss = loss.sum() if amp: scaler.scale(loss).backward() else: loss.backward() except RuntimeError as e: if ('CUDA out of memory' in str(e)): print(f'[Runner] - CUDA out of memory at step {global_step}') torch.cuda.empty_cache() optimizer.zero_grad() continue else: raise all_loss += loss.item() del loss backward_steps += 1 if ((backward_steps % gradient_accumulate_steps) > 0): continue if amp: scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(self.upstream.model.parameters(), self.config['runner']['gradient_clipping']) if math.isnan(grad_norm): print(f'[Runner] - Error : grad norm is NaN at global step {global_step}') if amp: scaler.step(optimizer) scaler.update() elif (not math.isnan(grad_norm)): optimizer.step() self.upstream.on_before_zero_grad() optimizer.zero_grad() if scheduler: scheduler.step() if (((global_step % self.config['runner']['log_step']) == 0) or (pbar.n == (pbar.total - 1))): self.logger.add_scalar(f'{prefix}loss', all_loss, global_step=global_step) if hasattr(optimizer, 'get_lr'): self.logger.add_scalar(f'{prefix}lr', optimizer.get_lr()[0], global_step=global_step) else: self.logger.add_scalar(f'{prefix}lr', self.config['optimizer']['lr'], global_step=global_step) self.logger.add_scalar(f'{prefix}gradient-norm', grad_norm, global_step=global_step) self.upstream.log_records(records=records, logger=self.logger, prefix=prefix, global_step=global_step) records = defaultdict(list) if (((global_step % self.config['runner']['save_step']) == 0) or (pbar.n == (pbar.total - 1))): def check_ckpt_num(directory): max_keep = self.config['runner']['max_keep'] ckpt_pths = glob.glob(f'{directory}/states-*.ckpt') if (len(ckpt_pths) >= max_keep): ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0]))) for ckpt_pth in ckpt_pths[:((len(ckpt_pths) - max_keep) + 1)]: os.remove(ckpt_pth) check_ckpt_num(self.args.expdir) all_states = {'Optimizer': optimizer.state_dict(), 'Step': pbar.n, 'Args': self.args, 'Config': self.config} all_states = self.upstream.add_state_to_save(all_states) if scheduler: all_states['Scheduler'] = scheduler.state_dict() name = (f'states-epoch-{n_epochs}.ckpt' if ((pbar.n == (pbar.total - 1)) and (n_epochs > 0)) else f'states-{global_step}.ckpt') save_path = os.path.join(self.args.expdir, name) tqdm.write(f'[Runner] - Save the checkpoint to: {save_path}') torch.save(all_states, save_path) all_loss = 0 pbar.update(1) pbar.close()
class KaldiAcousticDataset(_KaldiAcousticDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(KaldiAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_pad_batch = pad_sequence(x_batch, batch_first=True) return generate_spec_aug_data(spec=(x_pad_batch,), config=self.task_config)
class OnlineAcousticDataset(_OnlineAcousticDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, target_level=(- 25), **kwargs): super(OnlineAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, target_level, **kwargs) def _process_x_pad_batch(self, x_pad_batch): if (self.libri_root is not None): x_pad_batch = x_pad_batch.unsqueeze(1) feat_list = self.extracter(x_pad_batch) return generate_spec_aug_data(feat_list, config=self.task_config)
class UpstreamPretrainExpert(MockingjayPretrainExpert): '\n The spec augment transformer pretrain expert\n ' def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs): super(UpstreamPretrainExpert, self).__init__(datarc, upstream_config, device, multi_gpu, **kwargs) def _get_train_dataloader(self, extracter): if (('libri_root' in self.datarc) and ('kaldi' not in self.upstream_config['audio'])): dataset = OnlineAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc) else: dataset = KaldiAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], **self.datarc) self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)
class ASR(Problem): def run(self, target_dir: str, cache_dir: str, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, prepare_data: dict=None, prepare_tokenizer_data: dict=None, build_tokenizer: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None): '\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the metadata file for ASR (waveform path, label...)\n 1 Prepare the metadata file for training tokenizer\n 2 Train the tokenizer\n 3 Train the ASR model\n 4 Evaluate the model on multiple test sets, multiple checkpoints will be evaluated for each test set (See :code:`test_ckpt_steps`)\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use checkpoints specified by\n :code:`test_ckpts_steps`.\n **others:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n ' yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml') yaml_path.parent.mkdir(exist_ok=True, parents=True) with yaml_path.open('w') as f: yaml.safe_dump(self._get_current_arguments(), f) cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data')) prepare_data: dict = (prepare_data or {}) prepare_tokenizer_data: dict = (prepare_tokenizer_data or {}) build_tokenizer: dict = (build_tokenizer or {}) build_dataset: dict = (build_dataset or {}) build_batch_sampler: dict = (build_batch_sampler or {}) build_collate_fn: dict = (build_collate_fn or {}) build_upstream: dict = (build_upstream or {}) build_featurizer: dict = (build_featurizer or {}) build_downstream: dict = (build_downstream or {}) build_model: dict = (build_model or {}) build_task: dict = (build_task or {}) build_optimizer: dict = (build_optimizer or {}) build_scheduler: dict = (build_scheduler or {}) save_model: dict = (save_model or {}) save_task: dict = (save_task or {}) train: dict = (train or {}) evaluate = (evaluate or {}) target_dir: Path = Path(target_dir) target_dir.mkdir(exist_ok=True, parents=True) cache_dir = Path(cache_dir) cache_dir.mkdir(exist_ok=True, parents=True) if remove_all_cache: shutil.rmtree(cache_dir) stage_id = 0 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare data') (train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False) (train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True) def check_fn(): assert (Path(train_csv).is_file() and Path(valid_csv).is_file()) for test_csv in test_csvs: assert Path(test_csv).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 1 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare tokenizer data') tokenizer_data_path = self.prepare_tokenizer_data(prepare_tokenizer_data, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=False) tokenizer_data_path = self.prepare_tokenizer_data(prepare_tokenizer_data, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=True) def check_fn(): assert Path(tokenizer_data_path).exists() self._stage_check(stage_id, stop, check_fn) stage_id = 2 if (start <= stage_id): logger.info(f'Stage {stage_id}: build tokenizer') tokenizer_path = self.build_tokenizer(build_tokenizer, target_dir, cache_dir, tokenizer_data_path, get_path_only=False) tokenizer_path = self.build_tokenizer(build_tokenizer, target_dir, cache_dir, tokenizer_data_path, get_path_only=True) def check_fn(): assert Path(tokenizer_path).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 3 train_dir = (target_dir / 'train') if (start <= stage_id): logger.info(f'Stage {stage_id}: Train Model') (train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, tokenizer_path, build_dataset, build_batch_sampler) (valid_ds, valid_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'valid', valid_csv, tokenizer_path, build_dataset, build_batch_sampler) with Path(tokenizer_path).open('rb') as f: tokenizer = pickle.load(f) build_model_all_args = dict(build_model=build_model, model_output_size=len(tokenizer), build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream) build_task_all_args_except_model = dict(build_task=build_task, tokenizer=tokenizer) self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), valid_ds, valid_bs, self.build_collate_fn(build_collate_fn, 'valid'), device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank) def check_fn(): assert (train_dir / 'valid_best').is_dir() self._stage_check(stage_id, stop, check_fn) stage_id = 4 if (start <= stage_id): test_ckpt_dir: Path = Path((test_ckpt_dir or ((target_dir / 'train') / 'valid_best'))) logger.info(f'Stage {stage_id}: Test model: {test_ckpt_dir}') for (test_idx, test_csv) in enumerate(test_csvs): test_name = Path(test_csv).stem test_dir: Path = (((target_dir / 'evaluate') / test_ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) / test_name) test_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Stage {stage_id}.{test_idx}: Test model on {test_csv}') (test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, tokenizer_path, build_dataset, build_batch_sampler) (_, valid_best_task) = self.load_model_and_task(test_ckpt_dir) logs: dict = self.evaluate(evaluate, 'test', valid_best_task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers) test_metrics = {name: float(value) for (name, value) in logs.items()} logger.info(f'test results: {test_metrics}') with (test_dir / f'result.yaml').open('w') as f: yaml.safe_dump(test_metrics, f) return stage_id def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, tokenizer_path: str, build_dataset: dict, build_batch_sampler: dict): logger.info(f'Build {mode} dataset') dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, tokenizer_path) logger.info(f'Build {mode} batch sampler') batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, dataset) return (dataset, batch_sampler) def build_task(self, build_task: dict, model, tokenizer): task = Speech2TextCTCTask(model, tokenizer, **build_task) return task
def prepare_librispeech(target_dir, cache_dir, dataset_root, train_sets: List[str], valid_sets: List[str], test_sets: List[str], n_jobs: int=6, get_path_only: bool=False): '\n Prepare LibriSpeech for ASR following :obj:`SuperbASR.prepare_data` format.\n See :obj:`LibriSpeech` for the arguments usage\n ' target_dir = Path(target_dir) train_path = (target_dir / f"{'+'.join(train_sets)}.csv") valid_path = (target_dir / f"{'+'.join(valid_sets)}.csv") test_paths = [(target_dir / f'{test_set}.csv') for test_set in test_sets] if get_path_only: return (train_path, valid_path, test_paths) corpus = LibriSpeech(dataset_root, n_jobs, train_sets, valid_sets, test_sets) (train_data, valid_data, test_data) = corpus.data_split def dict_to_csv(data_dict, csv_path): keys = sorted(list(data_dict.keys())) fields = sorted(data_dict[keys[0]].keys()) data = dict() for field in fields: data[field] = [] for key in keys: data[field].append(data_dict[key][field]) data['id'] = keys df = pd.DataFrame(data) df.to_csv(csv_path, index=False) dict_to_csv(train_data, train_path) dict_to_csv(valid_data, valid_path) dict_to_csv(test_data, test_paths[0]) return (train_path, valid_path, test_paths)
def prepare_common_tokenizer(target_dir, cache_dir, tokenizer_data_path, get_path_only=False, tokenizer_name: str=None, vocab_file: str=None, vocab_type: str='character', vocab_args: dict=None, slots_file: str=None): '\n Build the tokenizer following :obj:`SuperbASR.build_tokenizer` format\n\n Args:\n tokenizer_name (str): Save the tokenizer filepath with this filename\n vocab_file (str): When the tokenizer was already prepared, and just want\n to load and return the tokenizer here. Path or URL\n vocab_type (str): character / phoneme / word / subword\n vocab_args (dict):\n when :code:`vocab_type` is character / phoneme / word, supports arguments in\n :obj:`s3prl.dataio.encoder.vocabulary.generate_basic_vocab`\n\n whe :code:`vocab_type` is subword, supports arguments in\n :obj:`s3prl.dataio.encoder.vocabulary.generate_subword_vocab`\n slots_file (str): If presented, the pre-defined slots will be used to encode the\n special tokens. Path or URL\n\n Return:\n str\n\n tokenizer path\n ' if (tokenizer_name is None): tokenizer_name = f'{Path(tokenizer_data_path).stem}-{vocab_type}.tokenizer' tokenizer_path = (Path(target_dir) / f'{tokenizer_name}.pkl') if get_path_only: return tokenizer_path if (vocab_file is not None): vocab_file = str(vocab_file) if vocab_file.startswith('http'): vocab_file = urls_to_filepaths(vocab_file) if (slots_file is not None): slots_file = str(slots_file) if slots_file.startswith('http'): slots_file = urls_to_filepaths(slots_file) if (vocab_file is not None): tokenizer = load_tokenizer(vocab_type, vocab_file=vocab_file, slots_file=slots_file) else: vocab_args = (vocab_args or {}) assert isinstance(vocab_args, dict) if ((vocab_type == 'subword') and (not ('output_file' in vocab_args))): vocab_args['output_file'] = (Path(target_dir) / 'tokenizer.spm') vocab_result = generate_vocab(vocab_type, text_file=str(tokenizer_data_path), **vocab_args) vocab_list = (vocab_result if isinstance(vocab_result, list) else None) vocab_file = (vocab_result if isinstance(vocab_result, str) else None) tokenizer = load_tokenizer(vocab_type, vocab_file=vocab_file, vocab_list=vocab_list, slots_file=slots_file) with open(tokenizer_path, 'wb') as f: pickle.dump(tokenizer, f) return tokenizer_path
class SuperbASR(ASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_sets=['train-clean-100'], valid_sets=['dev-clean'], test_sets=['test-clean']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='character'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=32, max_length=2000, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(model_conf=dict(module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_conf=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['cer', 'wer']), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='wer', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`prepare_librispeech` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`, support arguments in :obj:`prepare_librispeech`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n transcription (str) - a text string\n ==================== ====================\n ' return prepare_librispeech(**self._get_current_arguments(flatten_dict='prepare_data')) def prepare_tokenizer_data(self, prepare_tokenizer_data: dict, target_dir: str, cache_dir: str, train_csv: str, valid_csv: str, test_csvs: List[str], get_path_only: bool=False): '\n Prepare the text file used for training tokenizer.\n By default only use the transcription in the :code:`train_csv` returned from :obj:`prepare_data`\n The default :code:`prepare_tokenizer_data` build the character-based tokenizer\n\n Args:\n prepare_tokenizer_data (dict): same in :obj:`default_config`, no supported argument for now\n target_dir (str): Save the text file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv (str): The train data given by :obj:`prepare_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n The text file path, the text file should be in the format\n\n .. code-block:: none\n\n This is the first line\n This is the second line\n These are all text used for training tokenizer\n\n ' tokenizer_data_name = f'{Path(train_csv).stem}.tokenizer_data' tokenizer_data_path = (Path(target_dir) / f'{tokenizer_data_name}.txt') if get_path_only: return tokenizer_data_path all_text = pd.read_csv(train_csv)['transcription'] with tokenizer_data_path.open('w') as f: f.writelines([f'''{line} ''' for line in all_text]) return tokenizer_data_path def build_tokenizer(self, build_tokenizer: dict, target_dir: str, cache_dir: str, tokenizer_data_path: str, get_path_only: bool=False): '\n Build the tokenizer from the data prepared by :obj:`prepare_tokenizer_data`\n By default call :obj:`prepare_common_tokenizer` with :code:`**build_tokenizer`\n\n Args:\n build_tokenizer (dict): same in :obj:`default_config`, arguments for :obj:`prepare_common_tokenizer`\n target_dir (str): Current experinment directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n tokenizer_data_path (str): The text file from :obj:`prepare_tokenizer_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n filepath of the pickled :obj:`s3prl.dataio.encoder.tokenizer.Tokenizer`\n ' return prepare_common_tokenizer(**self._get_current_arguments(flatten_dict='build_tokenizer')) def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, tokenizer_path: str): '\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, not used\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n tokenizer_path (str): The pickled tokenizer path for encoding transcription\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_ids (torch.LongTensor) - the encoded class ids of a transcription (sentence)\n labels (str) - the text transcription\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n ' csv = pd.read_csv(data_csv) audio_loader = LoadAudio(csv['wav_path'].tolist()) with open(tokenizer_path, 'rb') as f: tokenizer = pickle.load(f) text_encoder = EncodeText(csv['transcription'].tolist(), tokenizer) ids = csv['id'].tolist() class Speech2TextDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] text = text_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_ids': text['class_ids'], 'labels': text['labels'], 'unique_name': ids[index]} dataset = Speech2TextDataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset: Dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`SortedBucketingSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedBucketingSampler(wav_lens, **(conf.train or {})) elif (mode == 'valid'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) return sampler def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`RNNEncoder` model wrapped with :obj:`ModelWithSpecaug`\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, has two keys:\n :code:`model_conf` is the arguments for :obj:`RNNEncoder`;\n :code:`specaug_conf` is the arguments for :obj:`ModelWithSpecaug`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFrameModel`\n " @dataclass class Config(): model_conf: dict = None specaug_conf: dict = None conf = Config(**build_downstream) model = RNNEncoder(downstream_input_size, downstream_output_size, **(conf.model_conf or {})) downstream = ModelWithSpecaug(model, **(conf.specaug_conf or {})) return downstream
class SuperbPR(SuperbASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_sets=['train-clean-100'], valid_sets=['dev-clean'], test_sets=['test-clean']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='phoneme'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=16, max_length=300000), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['per']), build_optimizer=dict(name='Adam', conf=dict(lr=0.01)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=100000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=2, valid_metric='per', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): (train_csv, valid_csv, test_csvs) = super().prepare_data(prepare_data, target_dir, cache_dir, get_path_only) if get_path_only: return (train_csv, valid_csv, test_csvs) g2p = G2P() def phonemize(csv_path): df = pd.read_csv(csv_path) text = df['transcription'].tolist() phonemized_text = [g2p.encode(t.strip()) for t in text] df['transcription'] = phonemized_text df.to_csv(csv_path, index=False) for csv_path in [train_csv, valid_csv, *test_csvs]: phonemize(csv_path) return (train_csv, valid_csv, test_csvs) def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`SortedSliceSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedSliceSampler(wav_lens, **(conf.train or {})) elif (mode == 'valid'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) return sampler def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`FrameLevelLinear`\n\n Args:\n build_downstream (dict): same in :obj:`default_config`,\n supports arguments in :obj:`FrameLevelLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFrameModel`\n " return FrameLevelLinear(downstream_input_size, downstream_output_size, **build_downstream)
def audio_snips_for_slot_filling(target_dir: str, cache_dir: str, dataset_root: str, train_speakers: List[str], valid_speakers: List[str], test_speakers: List[str], get_path_only: bool=False): target_dir = Path(target_dir) train_path = (target_dir / f'train.csv') valid_path = (target_dir / f'valid.csv') test_paths = [(target_dir / f'test.csv')] if get_path_only: return (train_path, valid_path, test_paths) corpus = SNIPS(dataset_root, train_speakers, valid_speakers, test_speakers) (train_data, valid_data, test_data) = corpus.data_split def dict_to_csv(data_dict, csv_path): data_ids = sorted(list(data_dict.keys())) fields = sorted(data_dict[data_ids[0]].keys()) data = defaultdict(list) for data_id in data_ids: data_point = data_dict[data_id] trans = data_point['transcription'] trans = trans.replace('楽園追放', 'EXPELLED') trans = trans.replace('官方杂志', '') trans = trans.replace('–', '-') trans = trans.replace('&', ' AND ') trans = trans.translate(translator) trans = re.sub(' +', ' ', trans).strip(' ') words = trans.split(' ') iobs = data_point['iob'].split(' ') assert (len(words) == len(iobs)) filtered_words = [] filtered_iobs = [] for (word, iob) in zip(words, iobs): if (word in '?!.,;-–…'): continue filtered_words.append(word) filtered_iobs.append(iob) assert (len(filtered_words) == len(filtered_iobs)) data_point['transcription'] = ' '.join(filtered_words) data_point['iob'] = ' '.join(filtered_iobs) for field in fields: data[field].append(data_point[field]) data['id'] = data_ids df = pd.DataFrame(data) df.to_csv(csv_path, index=False) dict_to_csv(train_data, train_path) dict_to_csv(valid_data, valid_path) dict_to_csv(test_data, test_paths[0]) return (train_path, valid_path, test_paths)
class SuperbSF(SuperbASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_speakers=['Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly', 'Matthew', 'Salli'], valid_speakers=['Aditi', 'Amy', 'Geraint', 'Nicole'], test_speakers=['Brian', 'Emma', 'Raveena', 'Russell']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='character'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=32, max_length=300000), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(model_conf=dict(module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_conf=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['wer', 'cer', 'slot_type_f1', 'slot_value_cer', 'slot_value_wer', 'slot_edit_f1_full', 'slot_edit_f1_part']), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='slot_type_f1', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`audio_snips_for_slot_filling` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`, support arguments in :obj:`audio_snips_for_slot_filling`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n transcription (str) - a text string where words are separted by a space.\n Eg. "I want to fly from Taipei to New York"\n iob (str) - iob tags, use "O" if no tag, every word should have a tag, separted by a space.\n Eg. "O O O O O from_location O to_location to_location"\n ==================== ====================\n ' return audio_snips_for_slot_filling(**self._get_current_arguments(flatten_dict='prepare_data')) def prepare_tokenizer_data(self, prepare_tokenizer_data: dict, target_dir: str, cache_dir: str, train_csv: str, valid_csv: str, test_csvs: str, get_path_only: bool=False): data_dir = (target_dir / 'tokenizer_data') if get_path_only: return data_dir train_df = pd.read_csv(train_csv) valid_df = pd.read_csv(valid_csv) test_dfs = [pd.read_csv(test_csv) for test_csv in test_csvs] iob_lines = pd.concat([train_df, valid_df, *test_dfs], axis=0)['iob'].tolist() iobs = [] for line in iob_lines: iobs.extend(line.split(' ')) iobs = list(sorted(set(iobs))) Path(data_dir).mkdir(parents=True, exist_ok=True) with open((data_dir / 'slot.txt'), 'w') as f: f.writelines([f'''{iob} ''' for iob in iobs]) train_df = pd.read_csv(train_csv) texts = train_df['transcription'].tolist() with open((data_dir / 'text.txt'), 'w') as f: f.writelines([f'''{t} ''' for t in texts]) return data_dir def build_tokenizer(self, build_tokenizer: dict, target_dir: str, cache_dir: str, tokenizer_data_path: str, get_path_only: bool=False): return prepare_common_tokenizer(target_dir, cache_dir, (Path(tokenizer_data_path) / 'text.txt'), get_path_only, None, None, slots_file=(Path(tokenizer_data_path) / 'slot.txt'), **build_tokenizer) def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, tokenizer_path: str): csv = pd.read_csv(data_csv) audio_loader = LoadAudio(csv['wav_path'].tolist()) with open(tokenizer_path, 'rb') as f: tokenizer = pickle.load(f) text_encoder = EncodeText(csv['transcription'].tolist(), tokenizer, iob=csv['iob'].tolist()) ids = csv['id'].tolist() class SlotFillingDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] text = text_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_ids': text['class_ids'], 'labels': text['labels'], 'unique_name': ids[index]} dataset = SlotFillingDataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`SortedSliceSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedSliceSampler(wav_lens, **(conf.train or {})) return sampler elif (mode == 'valid'): return FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): return FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) else: raise ValueError(f'Unsupported mode: {mode}')
class ASV(Problem): def run(self, target_dir: str, cache_dir: str, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, test_ckpt_steps: List[int]=None, prepare_data: dict=None, build_encoder: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None): '\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the metadata file (waveform path, label...)\n 1 Build the encoder for encoding the speaker labels\n 2 Train the model\n 3 Evaluate the model on multiple test sets, multiple checkpoints will be evaluated for each test set (See :code:`test_ckpt_steps`)\n 4 Report the best result find on each test set\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use checkpoints specified by\n :code:`test_ckpts_steps`.\n test_ckpt_steps (List[int]):\n After training, multiple steps of checkpoints are saved. This option specifies\n which checkpoints (multiple) will be used for evaluation.\n **kwds:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n ' yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml') yaml_path.parent.mkdir(exist_ok=True, parents=True) with yaml_path.open('w') as f: yaml.safe_dump(self._get_current_arguments(), f) cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data')) prepare_data: dict = (prepare_data or {}) build_encoder: dict = (build_encoder or {}) build_dataset: dict = (build_dataset or {}) build_batch_sampler: dict = (build_batch_sampler or {}) build_collate_fn: dict = (build_collate_fn or {}) build_upstream: dict = (build_upstream or {}) build_featurizer: dict = (build_featurizer or {}) build_downstream: dict = (build_downstream or {}) build_model: dict = (build_model or {}) build_task: dict = (build_task or {}) build_optimizer: dict = (build_optimizer or {}) build_scheduler: dict = (build_scheduler or {}) save_model: dict = (save_model or {}) save_task: dict = (save_task or {}) train: dict = (train or {}) evaluate = (evaluate or {}) target_dir: Path = Path(target_dir) target_dir.mkdir(exist_ok=True, parents=True) cache_dir = Path(cache_dir) cache_dir.mkdir(exist_ok=True, parents=True) if remove_all_cache: shutil.rmtree(cache_dir) stage_id = 0 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare data') (train_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False) (train_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True) def check_fn(): assert Path(train_csv).is_file() for test_csv in test_csvs: assert Path(test_csv).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 1 if (start <= stage_id): logger.info(f'Stage {stage_id}: build encoder') encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, test_csvs, get_path_only=False) encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, test_csvs, get_path_only=True) def check_fn(): assert Path(encoder_path).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 2 train_dir = (target_dir / 'train') if (start <= stage_id): logger.info(f'Stage {stage_id}: Train Model') (train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, encoder_path, build_dataset, build_batch_sampler) with Path(encoder_path).open('rb') as f: encoder = pickle.load(f) build_model_all_args = dict(build_model=build_model, model_output_size=len(encoder), build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream) build_task_all_args_except_model = dict(build_task=build_task, encoder=encoder) self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), None, None, None, device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank) test_ckpt_dirs = [] if (test_ckpt_dir is not None): test_ckpt_dirs.append(test_ckpt_dir) if (test_ckpt_steps is None): train_ckpts = [(train_dir / name) for name in os.listdir(train_dir) if name.startswith('step_')] test_ckpt_dirs.extend(train_ckpts) else: test_ckpt_dirs.extend([(train_dir / f'step_{step}') for step in test_ckpt_steps]) def check_fn(): for ckpt_dir in test_ckpt_dirs: assert Path(ckpt_dir).is_dir(), ckpt_dir self._stage_check(stage_id, stop, check_fn) stage_id = 3 if (start <= stage_id): for (test_idx, test_csv) in enumerate(test_csvs): for (ckpt_idx, ckpt_dir) in enumerate(test_ckpt_dirs): test_name = Path(test_csv).stem test_dir: Path = (((target_dir / 'evaluate') / test_name) / ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) test_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Stage {stage_id}.{test_idx}.{ckpt_idx}: Test on {test_csv} with model {ckpt_dir}') (test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, encoder_path, build_dataset, build_batch_sampler) csv = pd.read_csv(test_csv) test_trials = [] for (rowid, row) in csv.iterrows(): test_trials.append((int(row['label']), str(row['id1']), str(row['id2']))) overrides = dict(test_trials=test_trials) (_, task) = self.load_model_and_task(ckpt_dir, overrides) logs = self.evaluate(evaluate, 'test', task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers) test_metrics = {name: float(value) for (name, value) in logs.items()} logger.info(f'test metrics: {test_metrics}') assert ('EER' in test_metrics) with (test_dir / f'result.yaml').open('w') as f: yaml.safe_dump(test_metrics, f) self._stage_check(stage_id, stop, (lambda : True)) stage_id = 4 if (start <= stage_id): for (test_idx, test_csv) in enumerate(test_csvs): test_name = Path(test_csv).stem logger.info(f'Report results on {test_name}') eer_ckpts = [] for ckpt_dir in os.listdir(((target_dir / 'evaluate') / test_name)): result_yaml: Path = ((((target_dir / 'evaluate') / test_name) / ckpt_dir) / 'result.yaml') if result_yaml.is_file(): with open(result_yaml) as f: eer_ckpts.append((float(yaml.load(f, Loader=yaml.FullLoader)['EER']), str(result_yaml.parent))) logger.info(f'All EERs on {test_name}:') for (eer, ckpt) in eer_ckpts: logger.info(f'ckpt_dir: {ckpt}, eer: {eer}') eer_ckpts.sort(key=(lambda x: x[0])) (best_eer, best_ckpt_dir) = eer_ckpts[0] logger.info(f'Best EER on {test_name} is from {best_ckpt_dir}: {best_eer}') with (((target_dir / 'evaluate') / test_name) / 'best_result.yaml').open('w') as f: yaml.safe_dump({'EER': best_eer}, f) def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, build_dataset: dict, build_batch_sampler: dict): logger.info(f'Build {mode} dataset') dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, encoder_path) logger.info(f'Build {mode} batch sampler') batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, dataset) return (dataset, batch_sampler) def build_task(self, build_task: dict, model, encoder, test_trials=None): '\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`SpeakerVerification`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n encoder: the encoder built by :obj:`build_encoder`\n test_trials (List[Tuple[int, str, str]]): each tuple in the list consists of\n :code:`(label, enroll_utt_id, test_utt_id)`. label is either 0 or 1\n\n Returns:\n Task\n ' task = SpeakerVerification(model, encoder, test_trials, **build_task) return task
def prepare_voxceleb1_for_sv(target_dir: str, cache_dir: str, get_path_only: str, dataset_root: str, force_download: bool=False): '\n Prepare VoxCeleb1 for speaker verification\n following :obj:`SuperbASV.prepare_data` format.\n\n Args:\n dataset_root (str): The root path of Fluent Speech Command\n force_download (bool): always re-download the metadata for VoxCeleb1\n ' train_path = (target_dir / 'train.csv') test_trial_path = (target_dir / 'test_trial.csv') if get_path_only: return (train_path, [test_trial_path]) corpus = VoxCeleb1SV(dataset_root, cache_dir, force_download) (train_data, valid_data, test_data, test_trials) = corpus.all_data all_data = {**train_data, **valid_data} ids = sorted(all_data.keys()) wav_paths = [all_data[idx]['wav_path'] for idx in ids] labels = [all_data[idx]['label'] for idx in ids] pd.DataFrame({'id': ids, 'wav_path': wav_paths, 'spk': labels}).to_csv(train_path, index=False) (labels, id1s, id2s) = zip(*test_trials) wav_path1 = [test_data[idx]['wav_path'] for idx in id1s] wav_path2 = [test_data[idx]['wav_path'] for idx in id2s] pd.DataFrame({'id1': id1s, 'id2': id2s, 'wav_path1': wav_path1, 'wav_path2': wav_path2, 'label': labels}).to_csv(test_trial_path, index=False) return (train_path, [test_trial_path])
class SuperbASV(ASV): def default_config(self): return dict(target_dir=MISSING, cache_dir=None, test_ckpt_steps=None, prepare_data=dict(dataset_root=MISSING), build_dataset=dict(train=dict(min_secs=2.0, max_secs=8.0)), build_batch_sampler=dict(train=dict(batch_size=10, shuffle=True), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_model=dict(upstream_trainable=False), build_task=dict(loss_type='amsoftmax', loss_conf=dict(margin=0.4, scale=30)), build_optimizer=dict(name='AdamW', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), train=dict(total_steps=200000, log_step=500, eval_step=1e+20, save_step=10000, gradient_clipping=1000.0, gradient_accumulate=5, valid_metric=None, valid_higher_better=None, auto_resume=True, resume_ckpt_dir=None, keep_num_ckpts=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`prepare_voxceleb1_for_sv` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n support arguments in :obj:`prepare_voxceleb1_for_sv`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (bool): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. test_trial_paths (List[str])\n\n The :code:`train_path` should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this utterance\n wav_path (str) - the absolute path of the waveform file\n spk (str) - a string speaker label\n ==================== ====================\n\n Each :code:`test_trial_path` should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id1 (str) - the unique id of the first utterance\n id2 (str) - the unique id of the second utterance\n wav_path1 (str) - the absolute path of the first utterance\n wav_path2 (str) - the absolute path of the second utterance\n label (int) - 0 when two utterances are from different speakers, 1 when same speaker\n ==================== ====================\n ' return prepare_voxceleb1_for_sv(**self._get_current_arguments(flatten_dict='prepare_data')) def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv: str, test_csvs: list, get_path_only: bool): '\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoder` from the :code:`label` column of the train csv.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (bool): Directly return the filepaths no matter they exist or not\n\n Returns:\n str\n\n encoder_path: The encoder should be saved in the pickle format\n ' encoder_path = (Path(target_dir) / 'spk2int.pkl') if get_path_only: return encoder_path csv = pd.read_csv(train_csv) all_spk = sorted(set(csv['spk'])) spk2int = CategoryEncoder(all_spk) with open(encoder_path, 'wb') as f: pickle.dump(spk2int, f) return encoder_path def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str): '\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, have\n :code:`train` and :code:`test` keys, each is a dictionary, for :code:`train` dictionary:\n\n ==================== ====================\n key description\n ==================== ====================\n min_secs (float) - Drop a waveform if it is not longer than :code:`min_secs`\n max_secs (float) - If a waveform is longer than :code:`max_secs` seconds, randomly crop the waveform into :code:`max_secs` seconds. Default: None, no cropping\n ==================== ====================\n\n for :code:`test` dictionary, no argument supported yet\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n encoder_path (str): The pickled encoder path for encoding the labels\n\n Returns:\n torch Dataset\n\n For train mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_id (str) - the label class id encoded by :code:`encoder_path`\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n\n For test mode:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n unique_name (str) - the unique id for this datapoint\n\n ' assert (mode in ['train', 'test']), 'Only support train & test mode (no validation)' if (mode == 'train'): @dataclass class Config(): min_secs: float = None max_secs: float = None conf = build_dataset.get('train', {}) conf = Config(**conf) csv = pd.read_csv(data_csv) wav_paths = csv['wav_path'].tolist() audio_loader = LoadAudio(wav_paths, sox_effects=EFFECTS, max_secs=conf.max_secs) labels = csv['spk'].tolist() with open(encoder_path, 'rb') as f: encoder = pickle.load(f) label_encoder = EncodeCategory(labels, encoder) ids = csv['id'].tolist() class SVTrainDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] label = label_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_id': label['class_id'], 'unique_name': ids[index]} dataset = SVTrainDataset() if (conf.min_secs is not None): (x_lens, unique_names) = get_info(dataset, ['x_len', 'unique_name'], (target_dir / 'train_utt_len')) indices = [] removed_indices = [] for (idx, (x_len, unique_name)) in enumerate(zip(x_lens, unique_names)): secs = (x_len / SAMPLE_RATE) if (secs <= conf.min_secs): logger.info(f'Remove utt {unique_name} since too short after sox effects: {secs} secs') removed_indices.append(idx) else: indices.append(idx) if (len(removed_indices) > 0): logger.info(f'Remove in total {len(removed_indices)} utts') dataset = Subset(dataset, indices) elif (mode == 'test'): csv = pd.read_csv(data_csv) ids = pd.concat([csv['id1'], csv['id2']], ignore_index=True).tolist() wav_paths = pd.concat([csv['wav_path1'], csv['wav_path2']], ignore_index=True).tolist() data_list = sorted(set([(idx, path) for (idx, path) in zip(ids, wav_paths)])) (ids, wav_paths) = zip(*data_list) audio_loader = LoadAudio(wav_paths) class SVTestDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'unique_name': ids[index]} dataset = SVTestDataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n Note that ASV does not support valid\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' train = build_batch_sampler.get('train', {}) test = build_batch_sampler.get('test', {}) if (mode == 'train'): return FixedBatchSizeBatchSampler(dataset, **train) elif (mode == 'test'): return FixedBatchSizeBatchSampler(dataset, **test) else: raise ValueError('ASV only supports train/test modes') def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`SuperbXvector` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, support arguments of :obj:`SuperbXvector`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsUtteranceModel`\n " model = SuperbXvector(downstream_input_size, **build_downstream) return model
class _DistributedDataParallel(torch.nn.parallel.DistributedDataParallel): def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError: return getattr(self.module, name)
def _force_cacheable(data: dict): output = dict() for (key, value) in data.items(): if isinstance(value, torch.Tensor): value = value.detach().cpu() output[key] = value return output
def _to_device(data, device: str): output = dict() for (key, value) in data.items(): if isinstance(value, torch.Tensor): value = value.to(device) output[key] = value return output
def _doc_default_config(cls: Problem): "\n This is used to layout the :code:`default_config` dictionary into yaml format\n for :code:`default_config`'s docstring.\n " def _append_prefix_spaces(docstring: str): return '\n'.join([f' {line}' for line in docstring.split('\n')]) obj = cls() try: config = obj.default_config() except: return else: methods = [] for (k, v) in config.items(): if hasattr(cls, k): methods.append(getattr(cls, k)) method_links = ' '.join([f':obj:`{method.__name__}`' for method in methods]) yaml_str = yaml.dump(config, sort_keys=False, width=float('inf')) yaml_str = _append_prefix_spaces(yaml_str) cls.default_config.__doc__ = DEFAULT_CONFIG_FORMAT.format(method_links, yaml_str)
class Problem(): _store: Dict[(str, Problem)] = dict() def __init_subclass__(cls) -> None: super().__init_subclass__() cls._store[cls.__name__] = cls _doc_default_config(cls) @classmethod def get_class_from_name(cls, name: str): '\n Args:\n name (str): the :code:`__name__` of the problem class\n\n Returns:\n Problem\n ' assert (name in cls._store), f"The class '{name}' is either not defined or not imported" return cls._store[name] def build_collate_fn(self, build_collate_fn: dict, mode: str): '\n By default returns :obj:`s3prl.dataset.base.default_collate_fn`\n\n Args:\n build_collate_fn (dict): same in :obj:`default_config`, no argument supported for now\n mode (str): train, valid, or test\n\n Returns:\n callable\n\n the collate_fn for torch DataLoader in train/valid/test :code:`mode`\n ' return default_collate_fn def build_upstream(self, build_upstream: dict): '\n By default build the upstream with :obj:`s3prl.nn.upstream.S3PRLUpstream`\n\n Args:\n build_upstream (dict): same in :obj:`default_config`,\n arguments for :obj:`s3prl.nn.upstream.S3PRLUpstream`\n\n Returns:\n :obj:`s3prl.nn.interface.AbsUpstream`\n\n Return an upstream model, whose forward takes the waveform input and returns\n multiple hidden states as features.\n ' upstream = S3PRLUpstream(**build_upstream) return upstream def build_featurizer(self, build_featurizer: dict, upstream): '\n By default build the featurizer with :obj:`s3prl.nn.Featurizer`\n\n Args:\n build_featurizer (dict): same in :obj:`default_config`,\n arguments for :obj:`s3prl.nn.Featurizer`\n upstream (:obj:`AbsUpstream`): the upstream model built by :obj:`build_upstream`\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFeaturizer`\n\n Return the featurizer model. The featurizer is used to reduce the multiple\n hidden states returned from the upstream model (built by :obj:`build_upstream`)\n into a single hidden state, so can be easliy fed into the downstream model\n ' featurizer = Featurizer(upstream, **build_featurizer) return featurizer def build_model(self, build_model: dict, model_output_size: int, build_upstream: dict, build_featurizer: dict, build_downstream: dict): "\n By default build model with :obj:`s3prl.nn.upstream.UpstreamDownstreamModel`\n\n Args:\n build_model (dict): same in :obj:`default_config`,\n arguments for :obj:`s3prl.nn.upstream.UpstreamDownstreamModel`\n model_output_size (int): the required model's output hidden size\n build_upstream (dict): same in :obj:`default_config`, refer to :obj:`build_upstream`\n build_featurizer (dict): same in :obj:`default_config`, refer to :obj:`build_featurizer`\n build_downstream (dict): same in :obj:`default_config`, refer to :obj:`build_downstream`\n\n Returns:\n torch.nn.Module\n\n Return the entire model for the task, which takes the direct items from DataLoader as the input.\n Usually, the components can be built by :obj:`build_upstream`, :obj:`build_featurizer`,\n :obj:`build_downstream`, and are concated together to get the final model.\n The upstream extracts multiple hidden states, the featuizer reduce them into a single hidden state,\n and the downstream takes the hidden states as the feature for the downstream-specific model.\n " upstream = self.build_upstream(build_upstream) featurizer: Featurizer = self.build_featurizer(build_featurizer, upstream) downstream = self.build_downstream(build_downstream, featurizer.output_size, model_output_size, featurizer.downsample_rate) model = UpstreamDownstreamModel(upstream, featurizer, downstream, **build_model) return model def build_optimizer(self, build_optimizer: dict, parameters): '\n Args:\n build_optimizer (dict): same in :obj:`default_config`, refer to below\n\n ==================== ====================\n key description\n ==================== ====================\n name (str) - the optimizer class name in :obj:`torch.optim`\n conf (dict) - the arguments for initializing the optimizer class. e.g. :code:`{"lr": 1.0e-4}`\n ==================== ====================\n\n parameters (iterable): the standard params accepted by :obj:`torch.optim.Optimizer`.\n\n Returns:\n :obj:`torch.optim.Optimizer`\n\n An optimizer following standard torch usage\n ' def _default_build_optimizer(name: str, conf: dict): opt_cls = getattr(torch.optim, name) opt = opt_cls(parameters, **conf) return opt return _default_build_optimizer(**build_optimizer) def build_scheduler(self, build_scheduler: dict, optimizer): '\n Args:\n build_scheduler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n name (str) - the scheduler class name in :obj:`torch.optim.lr_scheduler`\n conf (dict) - the arguments for initializing the scheduler class. e.g. :code:`{"gamma": 0.01}` for :obj:`torch.optim.lr_scheduler.StepLR`\n ==================== ====================\n\n optimizer: the standard torch optimizer accepted by Scheduler in :obj:`torch.optim.lr_scheduler`.\n\n Returns:\n torch scheduler\n\n A scheduler following standard torch usage\n ' def _default_build_scheduler(name: str, conf: dict): scheduler_cls = getattr(torch.optim.lr_scheduler, name) scheduler = scheduler_cls(optimizer, **conf) return scheduler return _default_build_scheduler(**build_scheduler) def train(self, train: dict, train_dir: str, build_model_all_args: dict, build_task_all_args_except_model: dict, save_model: dict, save_task: dict, build_optimizer: dict, build_scheduler: dict, evaluate: dict, train_dataset, train_batch_sampler, train_collate_fn, valid_dataset, valid_batch_sampler, valid_collate_fn, num_workers: int, world_size: int, rank: int, eval_batch: int, device: str, global_config: dict=None): "\n Args:\n train (dict): same in :obj:`default_config`\n\n ========================== ====================\n key description\n ========================== ====================\n total_steps (int) - the total optimization steps\n log_step (int) - logging frequency. log every :code:`log_step` step\n eval_step (int) - evaluation frequency. Evaluate every :code:`eval_step` step. Note that you can control how many batch to evaluate to speed up the development by the :code:`eval_batch` argument in :obj:`run`\n save_step (int) - save the checkpoint every :code:`save_step` step.\n gradient_clipping (float) - clip the gradient. important for RNNs.\n gradient_accumulate (int) - accumulate multiple steps' gradient before updating network parameters to simulate large-batch optimization.\n valid_metric (str) - the metric to select the best valid checkpoint. Different Tasks have different supported valid_metrics. See :obj:`build_task` for the supported metrics.\n valid_higher_better (bool) - some metrics are higher better, while some are lower better this will affect how to save the best validation checkpoint.\n auto_resume (bool) - if there are already the last checkpoint in :code:`target_dir` (see :obj:`run`), whether to resume from it or delete it and start a new training session.\n resume_ckpt_dir (str) - you can directly specify the checkpoint path to resume which is not necessary in :code:`target_dir` (see :obj:`run`).\n seed (int) - fix the seed before the training start\n keep_num_ckpts (int) - to prevent saving too many checkpoints, only save the :code:`keep_num_ckpts` latest checkpoints and delete the old ones.\n use_scheduler (bool) - whether to use the scheduler\n ========================== ====================\n\n **others:\n only meaningful when you want to override this train method, which is not the\n common case. Hence we skip the documentation for now.\n " @dataclass class TrainConfig(): total_steps: int log_step: int eval_step: int save_step: int gradient_clipping: float gradient_accumulate: int valid_metric: str valid_higher_better: bool auto_resume: bool = True resume_ckpt_dir: str = None seed: int = 0 keep_num_ckpts: int = 2 use_scheduler: bool = False conf = TrainConfig(**train) fix_random_seeds(conf.seed) train_dir: Path = Path(train_dir) if ((not conf.auto_resume) and train_dir.is_dir()): logger.warning(f'{train_dir} exists. Delete the directory since auto_resume=False') shutil.rmtree(train_dir) train_dir.mkdir(exist_ok=True, parents=True) ckpt_dirs = [key for key in os.listdir(train_dir) if key.startswith('step_')] ckpt_dirs.sort(key=(lambda name: int(name.split('_')[(- 1)])), reverse=True) resume = False if conf.auto_resume: if ((conf.resume_ckpt_dir is not None) and Path(conf.resume_ckpt_dir).is_dir()): resume = True if (len(ckpt_dirs) > 0): resume = True if resume: resume_ckpt_dir = Path((conf.resume_ckpt_dir or (train_dir / ckpt_dirs[0]))) logger.info(f'Loading checkpoints from {resume_ckpt_dir}') try: (_, task) = self.load_model_and_task(resume_ckpt_dir) except: logger.error(f"Fail to load the checkpoint {resume_ckpt_dir}. You can set '--train.auto_resume False' to ignore the crashed checkpoint to avoid this behavior.") raise optimizer_state = torch.load((resume_ckpt_dir / 'optimizer.pt'), map_location='cpu') if conf.use_scheduler: scheduler_state = torch.load((resume_ckpt_dir / 'scheduler.pt'), map_location='cpu') else: scheduler_state = None with open((resume_ckpt_dir / 'training_stats.yaml'), 'r') as f: training_stats = yaml.load(f, Loader=yaml.FullLoader) global_step = int(training_stats['global_step']) epoch = int(training_stats['epoch']) valid_best_metrics = dict(training_stats['valid_best_metrics']) else: model = self.build_model(**build_model_all_args) task = self.build_task(model=model, **build_task_all_args_except_model) optimizer_state = None scheduler_state = None global_step = 0 epoch = 0 valid_best_metrics = dict() device = torch.device(device) wrapped_task = task.to(device) if (world_size > 1): torch.cuda.set_device(device.index) wrapped_task = _DistributedDataParallel(task, device_ids=[device.index], find_unused_parameters=True, output_device=device.index) optimizer = self.build_optimizer(build_optimizer, task.parameters()) if optimizer_state: optimizer.load_state_dict(optimizer_state) scheduler = None if conf.use_scheduler: scheduler = self.build_scheduler(build_scheduler, optimizer) if scheduler_state: scheduler.load_state_dict(scheduler_state) train_batch_sampler = DistributedBatchSamplerWrapper(train_batch_sampler, num_replicas=world_size, rank=rank) train_dataloader = DataLoader(train_dataset, batch_sampler=train_batch_sampler, num_workers=num_workers, collate_fn=train_collate_fn) tqdm_file = (sys.stderr if (rank == 0) else open(os.devnull, 'w')) pbar = tqdm(total=conf.total_steps, dynamic_ncols=True, desc='train', file=tqdm_file) pbar.n = global_step if (rank == 0): tf_dir = (train_dir / 'tb') tf_logger = SummaryWriter(str(tf_dir)) def _save_ckpts_to_dir(ckpts_dir: str, task, optimizer, scheduler, build_model_all_args: dict, build_task_all_args_except_model: dict, save_model: dict, save_task: dict, training_stats: dict, global_config: dict): ckpts_dir: Path = Path(ckpts_dir) ckpts_dir.mkdir(exist_ok=True, parents=True) model_ckpt_dir = (ckpts_dir / 'model') self.save_model(save_model, model_ckpt_dir, build_model_all_args, task.model) task_ckpt_dir = (ckpts_dir / 'task') self.save_task(save_task, task_ckpt_dir, build_task_all_args_except_model, task) torch.save(optimizer.state_dict(), (ckpts_dir / 'optimizer.pt')) if (scheduler is not None): torch.save(scheduler.state_dict(), (ckpts_dir / 'scheduler.pt')) with (ckpts_dir / 'training_stats.yaml').open('w') as f: yaml.safe_dump(training_stats, f) with (ckpts_dir / 'config.yaml').open('w') as f: yaml.safe_dump(global_config, f) backward_steps = 0 while (pbar.n < pbar.total): (train_batch_sampler.set_epoch(epoch),) batch_results = [] logger.info(f'Start epoch {epoch}') for batch in train_dataloader: try: if (pbar.n >= pbar.total): break global_step = (pbar.n + 1) wrapped_task.train() batch = _to_device(batch, device) (loss, cacheable) = wrapped_task('train', **batch) (loss / conf.gradient_accumulate).backward() batch_results.append(_force_cacheable(cacheable)) except RuntimeError as e: if (world_size > 1): raise acceptable = False for acc_err in ACCEPTABLE_ERRORS: if (str(e) in acc_err): acceptable = True break if (not acceptable): raise logger.warning(f'Step {global_step}: {str(e)}') with torch.cuda.device(device): torch.cuda.empty_cache() optimizer.zero_grad() continue backward_steps += 1 if ((backward_steps % conf.gradient_accumulate) > 0): continue grad_norm = torch.nn.utils.clip_grad_norm_(wrapped_task.parameters(), conf.gradient_clipping) if math.isnan(grad_norm): logger.warning(f'[Runner] - grad norm is NaN at step {global_step}') else: optimizer.step() optimizer.zero_grad() if conf.use_scheduler: scheduler.step() if (rank > 0): batch_results = [] pbar.update(1) continue def _log_results(split_name: str, logs: dict, tensorboard: SummaryWriter, global_step: int): logger.info(f'{split_name} at step {global_step}') for (name, value) in logs.items(): value = float(value) logger.info(f'{name}: {value}') tensorboard.add_scalar(f'{split_name}-{name}', value, global_step=global_step) if ((global_step % conf.log_step) == 0): logs = wrapped_task.reduction('train', batch_results) _log_results('train', logs, tf_logger, global_step) batch_results = [] save_names = [] if ((global_step % conf.eval_step) == 0): assert ((valid_dataset is not None) and (valid_batch_sampler is not None)), f'valid dataset is not supported, please set train.eval_step to infinite' logs: dict = self.evaluate(evaluate, 'valid', task, valid_dataset, valid_batch_sampler, valid_collate_fn, eval_batch, train_dir, device, num_workers) _log_results('valid', logs, tf_logger, global_step) valid_metrics = {k: float(v) for (k, v) in logs.items()} new_metric = valid_metrics[conf.valid_metric] best_metric = valid_best_metrics.get(conf.valid_metric) if (best_metric is None): is_new_best = True elif conf.valid_higher_better: is_new_best = (new_metric > best_metric) else: is_new_best = (new_metric < best_metric) if is_new_best: valid_best_metrics = deepcopy(valid_metrics) save_names.append('valid_best') if ((global_step % conf.save_step) == 0): ckpt_dirs = [key for key in os.listdir(train_dir) if key.startswith('step_')] ckpt_dirs.sort(key=(lambda stem: int(stem.split('_')[(- 1)]))) if ((conf.keep_num_ckpts is not None) and (len(ckpt_dirs) >= conf.keep_num_ckpts)): for ckpt_dir in ckpt_dirs[:((len(ckpt_dirs) - conf.keep_num_ckpts) + 1)]: shutil.rmtree((train_dir / ckpt_dir)) save_names.append(f'step_{global_step}') for name in save_names: training_stats = dict(global_step=global_step, epoch=epoch, valid_best_metrics=valid_best_metrics) _save_ckpts_to_dir((train_dir / name), (task.module if isinstance(task, _DistributedDataParallel) else task), optimizer, scheduler, build_model_all_args, build_task_all_args_except_model, save_model, save_task, training_stats, global_config) pbar.update(1) epoch += 1 pbar.close() if (rank == 0): tf_logger.close() def evaluate(self, evaluate: dict, mode: str, task, dataset, batch_sampler, collate_fn, eval_batch: int, dump_dir: str, device: str, num_workers: int): '\n The evaluate routine used by :obj:`train` (during validation phase) and :obj:`run`\n (during testing phase).\n\n Args:\n evaluate (dict): same in :obj:`default_config`, no argument supported for now\n **others:\n only meaningful when you want to override this train method, which is not the\n common case. Hence we skip the documentation for now.\n\n ' assert (mode in ['valid', 'test']) dataloader = DataLoader(dataset, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn) task = task.to(device) with torch.no_grad(): batch_results = [] for (batch_idx, batch) in enumerate(tqdm(dataloader, desc=mode, total=len(dataloader))): if (batch_idx == eval_batch): break batch = _to_device(batch, device) task.eval() (loss, cacheable) = task(mode, _dump_dir=dump_dir, **batch) batch_results.append(_force_cacheable(cacheable)) logs = task.reduction(mode, batch_results, _dump_dir=dump_dir) return logs def save_model(self, save_model: dict, model_ckpt_dir: str, build_model_all_args: dict, model: torch.nn.Module): '\n Save the model state_dict and the model initialization arguments into the given directory.\n If you override this method, it is highly possible you also need to override :obj:`load_model`\n\n Args:\n save_model (dict): same in :obj:`default_config`, so the user can save additional settings,\n like the configuration of the dataset by duplicating the dataset hypers\n inside the :code:`save_model` field. You can rely on the :code:`omegaconf`\n package to simplify the duplication.\n model_ckpt_dir (str): save the model into the this directory.\n build_model_all_args (dict): all the arguments of :obj:`build_model`.\n By saving this dictionary, you can easily reconstruct the same model\n by calling :obj:`build_model` with the saved dictionary.\n model (torch.nn.Module): the model to be saved.\n\n Returns:\n None\n ' model_ckpt_dir: Path = Path(model_ckpt_dir) if model_ckpt_dir.is_dir(): shutil.rmtree(model_ckpt_dir, ignore_errors=True) model_ckpt_dir.mkdir(exist_ok=True, parents=True) with (model_ckpt_dir / 'problem_name').open('w') as f: f.write(f'{self.__class__.__name__}') torch.save(model.state_dict(), (model_ckpt_dir / 'state_dict.pt')) with (model_ckpt_dir / f'arguments.yaml').open('w') as f: yaml.safe_dump(build_model_all_args, f) if (len(save_model) > 0): with (model_ckpt_dir / 'extra_conf.yaml').open('w') as f: yaml.safe_dump(save_model, f) def load_model(self, model_ckpt_dir: str): '\n Return the saved model.\n\n Args:\n model_ckpt_dir (str): Restore the model with :obj:`build_model` and the checkpoint\n saved in this directory.\n\n Return:\n :obj:`torch.nn.Module`\n ' model_ckpt_dir: Path = Path(model_ckpt_dir) with (model_ckpt_dir / 'arguments.yaml').open('r') as f: arguments = yaml.load(f, Loader=yaml.SafeLoader) model = self.build_model(**arguments) state_dict = torch.load((model_ckpt_dir / 'state_dict.pt'), map_location='cpu') model.load_state_dict(state_dict) return model def save_task(self, save_task: dict, task_ckpt_dir: str, build_task_all_args_except_model: dict, task: Task): "\n Save the task's state, :code:`task.get_state()`, and the initialization arguments into the given\n directory. If you override this method, it is highly possible you also need to override\n :obj:`load_task`.\n\n Args:\n save_task (dict): same in :obj:`default_config`, so the user can save additional settings,\n like the configuration of the dataset by duplicating the dataset hypers\n inside the :code:`save_task` field. You can rely on the :code:`omegaconf`\n package to simplify the duplication.\n task_ckpt_dir (str): save the task into this directory.\n build_task_all_args_except_model (dict): all the arguments of :obj:`build_task` except\n the :code:`model` argument since the model should be sapartely saved by\n :obj:`save_model`. By saving this dictionary, you can easily reconstruct the same task\n by calling :obj:`build_task` with the saved dictionary.\n task (Task): the task to be saved.\n\n Returns:\n None\n " task_ckpt_dir: Path = Path(task_ckpt_dir) if task_ckpt_dir.is_dir(): shutil.rmtree(task_ckpt_dir, ignore_errors=True) task_ckpt_dir.mkdir(exist_ok=True, parents=True) with (task_ckpt_dir / 'problem_name').open('w') as f: f.write(f'{self.__class__.__name__}') torch.save(task.get_state(), (task_ckpt_dir / 'state.pt')) arguments = build_task_all_args_except_model arguments_dir = (task_ckpt_dir / 'arguments') arguments_dir.mkdir(exist_ok=True, parents=True) for (k, v) in arguments.items(): try: yaml.safe_dump(v) except: with (arguments_dir / f'{k}.pkl').open('wb') as f: pickle.dump(v, f) else: with (arguments_dir / f'{k}.yaml').open('w') as f: yaml.safe_dump(v, f) if (len(save_task) > 0): with (task_ckpt_dir, 'extra_conf.yaml').open('w') as f: yaml.safe_dump(save_task, f) def load_task(self, task_ckpt_dir: str, model: torch.nn.Module, task_overrides: dict=None): "\n Return the saved task.\n\n Args:\n task_ckpt_dir (str): Restore the task with :obj:`build_task` and the checkpoint\n saved in this directory.\n model (torch.nn.Module): the model for the task, since the model is separately saved\n and is required for :obj:`build_task`.\n task_overrides (dict): overrides the saved initialization arguments, so can change\n the loaded task's behavior. Like, change the decoding hyperparameters.\n\n Returns:\n :obj:`s3prl.task.Task`\n " task_ckpt_dir: Path = Path(task_ckpt_dir) task_overrides = (task_overrides or {}) arguments = task_overrides.copy() arguments_dir = (task_ckpt_dir / 'arguments') for filename in os.listdir(arguments_dir): filepath = (arguments_dir / filename) key = filepath.stem if (key in task_overrides): continue if (filepath.suffix == '.yaml'): with filepath.open('r') as f: value = yaml.load(f, Loader=yaml.SafeLoader) elif (filepath.suffix == '.pkl'): with filepath.open('rb') as f: value = pickle.load(f) assert (key not in arguments), f"Unexpected duplicated file stem '{key}' found in {arguments_dir}. Please delete one of them." arguments[key] = value task = self.build_task(model=model, **arguments) state = torch.load((Path(task_ckpt_dir) / 'state.pt'), map_location='cpu') task.set_state(state) return task def load_model_and_task(self, ckpts_dir: str, task_overrides: dict=None): "\n This is a helper method to combine :obj:`load_model` and :obj:`load_task`\n together to directly load the model and the task. This method assumes\n the model is saved under :code:`ckpts_dir / 'model'` and the task is\n saved under :code:`ckpts_dir / 'task'`\n\n Returns:\n tuple\n\n 1. model (:obj:`torch.nn.Module`)\n 2. task (:obj:`s3prl.task.Task`)\n " ckpts_dir: Path = Path(ckpts_dir) task_overrides = (task_overrides or {}) model = self.load_model((ckpts_dir / 'model')) task = self.load_task((ckpts_dir / 'task'), model, task_overrides) return (model, task) @staticmethod def _get_current_arguments(exclude_self_and_cls: bool=True, flatten_dict: Union[(str, List[str])]=None) -> dict: if isinstance(flatten_dict, str): flatten_dict = [flatten_dict] frame = inspect.currentframe().f_back (args, _, _, values) = inspect.getargvalues(frame) config = {key: values[key] for key in args} if exclude_self_and_cls: config.pop('self', None) config.pop('cls', None) if (flatten_dict is not None): flatten_config = {} for (k, v) in config.items(): if (k in flatten_dict): assert isinstance(v, dict) for (_k, _v) in v.items(): flatten_config[_k] = _v else: flatten_config[k] = v config = flatten_config def assert_no_missing(config: dict): omegaconf.OmegaConf.to_container(omegaconf.OmegaConf.create(config), throw_on_missing=True) assert_no_missing(config) return config @staticmethod def _get_time_tag(): return datetime.fromtimestamp(time()).strftime('%Y_%m_%d_%H_%M_%S') @staticmethod def _stage_check(stage_id: int, stop: int, check_fn: callable): try: check_fn() except: logger.error(f'Stage {stage_id} was not done before or is corrupted. Please re-run from this stage.') raise if (isinstance(stop, int) and (stage_id == stop)): exit(0) def main(self, args: List[str]=None): parser = argparse.ArgumentParser() parser.add_argument('--verbose', default='INFO') parser.add_argument('--config', help='The yaml config path to override the default config') parser.add_argument('--print_config', '-p', action='store_true') parser.add_argument('--dump_config', '-d', help='The path to dump the default config as yaml') (args, override) = parser.parse_known_args(args) if args.print_config: print(f''' Default config of {self} ''') print(yaml.safe_dump(self.default_config())) exit(0) if (args.dump_config is not None): with open(args.dump_config, 'w') as f: yaml.safe_dump(self.default_config(), f) exit(0) root_logger = logging.getLogger() root_logger.handlers = [] logging.basicConfig(level=getattr(logging, args.verbose), format=LOGGING_FORMAT) if (args.config is not None): with open(args.config) as f: yaml_conf = (yaml.load(f, Loader=yaml.FullLoader) or dict()) else: yaml_conf = dict() override_conf = parse_overrides(override) schema = omegaconf.OmegaConf.create(self.default_config()) config = omegaconf.OmegaConf.merge(schema, yaml_conf, override_conf) config = omegaconf.OmegaConf.to_container(config, resolve=True, throw_on_missing=True) logger.info(config) self.run(**config) return config
def resample_hear_corpus(task_dir: str, target_sr: int=16000, num_workers: int=6): '\n Resample audio files in\n\n ${task_dir}/48000/\n\n to\n\n ${task_dir}/${target_sr}/\n ' task_dir: Path = Path(task_dir) target_audio_dir: Path = (task_dir / f'{target_sr}') if target_audio_dir.is_dir(): logger.info(f'{target_audio_dir} already exist. Do not need to resample') return default_audio_dir = (task_dir / '48000') assert default_audio_dir.exists(), f'{default_audio_dir} not found' split_names = os.listdir(default_audio_dir) for split_name in sorted(split_names): split_dir = (default_audio_dir / split_name) wav_paths = find_files(split_dir) tgt_dir = (target_audio_dir / split_name) tgt_dir.mkdir(exist_ok=True, parents=True) def resample(wav_path: str): (wav, sr) = torchaudio.load(wav_path) if (sr != target_sr): resampler = torchaudio.transforms.Resample(sr, target_sr) wav = resampler(wav) torchaudio.save(str((tgt_dir / Path(wav_path).name)), wav, sample_rate=target_sr) logger.info(f'Resampling {split_dir} to {tgt_dir}:') Parallel(n_jobs=num_workers)((delayed(resample)(path) for path in tqdm(wav_paths)))
class CommonExample(SuperbSID): def default_config(self) -> dict: config = super().default_config() config['prepare_data'] = {} config['train'] = dict(total_steps=10, log_step=1, eval_step=5, save_step=5, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='accuracy', valid_higher_better=True, auto_resume=True) return config def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): target_dir: Path = Path(target_dir) wavs = [torch.randn(1, (16000 * 2)) for i in range(5)] wav_paths = [] for (idx, wav) in enumerate(wavs): wav_path = str((Path(target_dir) / f'{idx}.wav')) torchaudio.save(wav_path, wav, sample_rate=16000) wav_paths.append(wav_path) ids = [Path(path).stem for path in wav_paths] labels = ['a', 'a', 'b', 'c', 'd'] df = pd.DataFrame({'id': ids, 'wav_path': wav_paths, 'label': labels}) (train_csv, valid_csv, test_csv) = ((target_dir / 'train.csv'), (target_dir / 'valid.csv'), (target_dir / 'test.csv')) df.iloc[:3].to_csv(train_csv) df.iloc[3:4].to_csv(valid_csv) df.iloc[4:].to_csv(test_csv) return (train_csv, valid_csv, [test_csv])
class HearBeijingOpera(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=BEIJING_OPERA_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearCremaD(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=CREMAD_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def dcase_2016_task2(target_dir: str, cache_dir: str, dataset_root: str, get_path_only: bool=False): target_dir: Path = Path(target_dir) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') if get_path_only: return (train_csv, valid_csv, [test_csv]) resample_hear_corpus(dataset_root, target_sr=16000) dataset_root = Path(dataset_root) wav_root: Path = (dataset_root / '16000') def json_to_csv(json_path: str, csv_path: str, split: str): with open(json_path) as fp: metadata = json.load(fp) data = defaultdict(list) for utt in metadata: wav_path: Path = ((wav_root / split) / utt).resolve() assert wav_path.is_file() info = torchaudio.info(wav_path) baseinfo = {'record_id': utt, 'wav_path': str(wav_path), 'duration': (info.num_frames / info.sample_rate)} for segment in metadata[utt]: fullinfo = deepcopy(baseinfo) fullinfo['utt_id'] = f"{baseinfo['record_id']}-{int(segment['start'])}-{int(segment['end'])}" fullinfo['labels'] = segment['label'] fullinfo['start_sec'] = (segment['start'] / 1000) fullinfo['end_sec'] = (segment['end'] / 1000) for (key, value) in fullinfo.items(): data[key].append(value) pd.DataFrame(data=data).to_csv(csv_path, index=False) json_to_csv((dataset_root / 'train.json'), train_csv, 'train') json_to_csv((dataset_root / 'valid.json'), valid_csv, 'valid') json_to_csv((dataset_root / 'test.json'), test_csv, 'test') return (train_csv, valid_csv, [test_csv])
class HearDcase2016Task2(HearFSD): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_dataset=dict(train=dict(chunk_secs=4.0, step_secs=4.0), valid=dict(chunk_secs=4.0, step_secs=4.0), test=dict(chunk_secs=4.0, step_secs=4.0)), build_batch_sampler=dict(train=dict(batch_size=5, shuffle=True)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multilabel', scores=['event_onset_200ms_fms', 'segment_1s_er'], postprocessing_grid={'median_filter_ms': [250], 'min_duration': [125, 250]}), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='event_onset_200ms_fms', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): return dcase_2016_task2(**self._get_current_arguments(flatten_dict='prepare_data')) def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int): @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_dataset) conf = getattr(conf, mode) conf = (conf or {}) with open(encoder_path, 'rb') as f: encoder = pickle.load(f) df = pd.read_csv(data_csv) df['label'] = [encoder.encode(label) for label in df['labels'].tolist()] dataset = FrameLabelDataset(df, len(encoder), frame_shift, **conf) return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): return FixedBatchSizeBatchSampler(dataset, **(conf.train or {})) elif (mode == 'valid'): record_ids = get_info(dataset, ['record_id'], (target_dir / 'valid_stats')) return GroupSameItemSampler(record_ids) elif (mode == 'test'): record_ids = get_info(dataset, ['record_id'], (target_dir / 'test_stats')) return GroupSameItemSampler(record_ids) else: raise ValueError(f'Unsupported mode: {mode}') def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None): def df_to_events(df: pd.DataFrame): data = {} for (rowid, row) in df.iterrows(): record_id = row['record_id'] if (not (record_id in data)): data[record_id] = [] data[record_id].append({'start': (row['start_sec'] * 1000), 'end': (row['end_sec'] * 1000), 'label': row['labels']}) return data valid_events = (None if (valid_df is None) else df_to_events(valid_df)) test_events = (None if (test_df is None) else df_to_events(test_df)) return EventPredictionTask(model, encoder, valid_target_events=valid_events, test_target_events=test_events, **build_task)
def hear_scene_kfolds(target_dir: str, cache_dir: str, dataset_root: str, test_fold: int, num_folds: int, get_path_only: bool=False): assert (test_fold < num_folds), f'test_fold id must be smaller than num_folds. get test_fold={test_fold} and num_folds={num_folds}' target_dir = Path(target_dir) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') if get_path_only: return (train_csv, valid_csv, [test_csv]) resample_hear_corpus(dataset_root, target_sr=16000) dataset_root = Path(dataset_root) wav_root: Path = (dataset_root / '16000') def load_json(filepath): with open(filepath, 'r') as fp: return json.load(fp) fold_metas = [] fold_datas = [] for fold_id in range(num_folds): meta = load_json((dataset_root / f'fold{fold_id:2d}.json'.replace(' ', '0'))) fold_metas.append(meta) data = defaultdict(list) for k in list(meta.keys()): wav_path = ((wav_root / f'fold{fold_id:2d}'.replace(' ', '0')) / k) labels = meta[k] data['id'].append(k) data['wav_path'].append(wav_path) data['labels'].append(','.join([str(label).strip() for label in labels])) df = pd.DataFrame(data=data) fold_datas.append(df) test_id = test_fold valid_id = ((test_fold + 1) % num_folds) train_ids = [idx for idx in range(num_folds) if (idx not in [test_id, valid_id])] test_data = fold_datas[test_id] valid_data = fold_datas[valid_id] train_data = [] for idx in train_ids: train_data.append(fold_datas[idx]) train_data = pd.concat(train_data) train_data.to_csv(train_csv, index=False) valid_data.to_csv(valid_csv, index=False) test_data.to_csv(test_csv, index=False) return (train_csv, valid_csv, [test_csv])
class HearESC50(HearFSD): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=ESC50_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=4000, log_step=100, eval_step=500, save_step=100, gradient_clipping=1.0, gradient_accumulate=4, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): return hear_scene_kfolds(**self._get_current_arguments(flatten_dict='prepare_data'))
def hear_scene_trainvaltest(target_dir: str, cache_dir: str, dataset_root: str, get_path_only: bool=False): target_dir = Path(target_dir) resample_hear_corpus(dataset_root, target_sr=16000) dataset_root = Path(dataset_root) wav_root: Path = (dataset_root / '16000') train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test_csv') if get_path_only: return (train_csv, valid_csv, [test_csv]) def load_json(filepath): with open(filepath, 'r') as fp: return json.load(fp) def split_to_df(split: str) -> pd.DataFrame: meta = load_json((dataset_root / f'{split}.json')) data = defaultdict(list) for k in list(meta.keys()): data['id'].append(k) data['wav_path'].append(((wav_root / split) / k)) data['labels'].append(' ; '.join([str(label).strip() for label in meta[k]])) return pd.DataFrame(data=data) split_to_df('train').to_csv(train_csv, index=False) split_to_df('valid').to_csv(valid_csv, index=False) split_to_df('test').to_csv(test_csv, index=False) return (train_csv, valid_csv, [test_csv])
class HearFSD(SuperbSID): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_batch_sampler=dict(train=dict(batch_size=10, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multilabel', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=40000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='mAP', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): return hear_scene_trainvaltest(**self._get_current_arguments(flatten_dict='prepare_data')) def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False): encoder_path = (Path(target_dir) / 'encoder.pkl') if get_path_only: return encoder_path train_csv = pd.read_csv(train_csv_path) valid_csv = pd.read_csv(valid_csv_path) test_csvs = [pd.read_csv(path) for path in test_csv_paths] all_csv = pd.concat([train_csv, valid_csv, *test_csvs]) all_labels = [] for (rowid, row) in all_csv.iterrows(): labels = str(row['labels']).split(';') labels = [l.strip() for l in labels] all_labels.extend(labels) encoder = CategoryEncoder(all_labels) with open(encoder_path, 'wb') as f: pickle.dump(encoder, f) return encoder_path def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int): df = pd.read_csv(data_csv) ids = df['id'].tolist() wav_paths = df['wav_path'].tolist() labels = [[single_label.strip() for single_label in str(label_str).split(';')] for label_str in df['labels'].tolist()] with open(encoder_path, 'rb') as f: encoder = pickle.load(f) audio_loader = LoadAudio(wav_paths) label_encoder = EncodeMultiLabel(labels, encoder) class Dataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] label = label_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'y': label['binary_labels'], 'labels': label['labels'], 'unique_name': ids[index]} dataset = Dataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) return FixedBatchSizeBatchSampler(dataset, **(conf.train or {})) def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): return HearFullyConnectedPrediction(downstream_input_size, downstream_output_size, **build_downstream) def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None): return ScenePredictionTask(model, encoder, **build_task)
class HearGSC5hr(HearFSD): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearGtzan(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=GTZAN_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearGtzanMusicSpeech(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=GTZAN_MUSIC_SPEECH_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'mAP', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearGunshot(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=GUNSHOT_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearLibriCount(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=LIBRICOUNT_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
def prepare_maestro(target_dir: str, cache_dir: str, dataset_root: str, test_fold: int=0, get_path_only: bool=False): target_dir: Path = Path(target_dir) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') if get_path_only: return (train_csv, valid_csv, [test_csv]) assert (test_fold < MAESTRO_NUM_FOLDS), f"MAESTRO only has {MAESTRO_NUM_FOLDS} folds but get 'test_fold' arguments {test_fold}" resample_hear_corpus(dataset_root, target_sr=16000) dataset_root = Path(dataset_root) wav_root = (dataset_root / '16000') NUM_FOLD = 5 test_id = test_fold valid_id = ((test_fold + 1) % NUM_FOLD) train_ids = [idx for idx in range(NUM_FOLD) if (idx not in [test_id, valid_id])] fold_metas = [] fold_dfs = [] for fold_id in range(NUM_FOLD): with open((dataset_root / f'fold{fold_id:2d}.json'.replace(' ', '0'))) as f: metadata = json.load(f) fold_metas.append(metadata) data = defaultdict(list) for utt in metadata: wav_path = ((wav_root / f'fold{fold_id:2d}'.replace(' ', '0')) / utt).resolve() info = torchaudio.info(wav_path) baseinfo = {'record_id': utt, 'wav_path': str(wav_path), 'duration': (info.num_frames / info.sample_rate)} for segment in metadata[utt]: fullinfo = deepcopy(baseinfo) fullinfo['utt_id'] = f"{baseinfo['record_id']}-{int(segment['start'])}-{int(segment['end'])}" fullinfo['labels'] = segment['label'] fullinfo['start_sec'] = (segment['start'] / 1000) fullinfo['end_sec'] = (segment['end'] / 1000) for (key, value) in fullinfo.items(): data[key].append(value) fold_dfs.append(pd.DataFrame(data=data)) (test_meta, test_data) = (fold_metas[test_id], fold_dfs[test_id]) (valid_meta, valid_data) = (fold_metas[valid_id], fold_dfs[valid_id]) (train_meta, train_data) = ({}, []) for idx in train_ids: train_meta.update(fold_metas[idx]) train_data.append(fold_dfs[idx]) train_data: pd.DataFrame = pd.concat(train_data) train_data.to_csv(train_csv, index=False) valid_data.to_csv(valid_csv, index=False) test_data.to_csv(test_csv, index=False) return (train_csv, valid_csv, [test_csv])
class HearMaestro(HearDcase2016Task2): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING), build_batch_sampler=dict(train=dict(batch_size=5, shuffle=True), valid=dict(item='record_id'), test=dict(item='record_id')), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multilabel', scores=['event_onset_50ms_fms', 'event_onset_offset_50ms_20perc_fms'], postprocessing_grid={'median_filter_ms': [150], 'min_duration': [50]}), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=15000, log_step=100, eval_step=500, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='event_onset_50ms_fms', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): return prepare_maestro(**self._get_current_arguments(flatten_dict='prepare_data'))
class HearNsynth5hr(HearFSD): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['pitch_acc', 'chroma_acc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='pitch_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearStroke(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=STROKE_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearTonic(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=TONIC_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearVocal(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=VOCAL_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='mAP', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class HearVoxLingual(HearESC50): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, test_fold=MISSING, num_folds=VOX_LINQUAL_NUM_FOLDS), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_layers=2, pooling_type='MeanPooling'), build_model=dict(upstream_trainable=False), build_task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP']), build_optimizer=dict(name='Adam', conf=dict(lr=0.001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='top1_acc', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict())
class Common(Problem): def run(self, target_dir: str, cache_dir: str=None, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, prepare_data: dict=None, build_encoder: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None): '\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the metadata file (waveform path, label...)\n 1 Build the encoder to encode the labels\n 2 Train the model\n 3 Evaluate the model on multiple test sets\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use the validation best\n checkpoint under the given :code:`target_dir` directory.\n **kwds:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n ' yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml') yaml_path.parent.mkdir(exist_ok=True, parents=True) with yaml_path.open('w') as f: yaml.safe_dump(self._get_current_arguments(), f) cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data')) prepare_data: dict = (prepare_data or {}) build_encoder: dict = (build_encoder or {}) build_dataset: dict = (build_dataset or {}) build_batch_sampler: dict = (build_batch_sampler or {}) build_collate_fn: dict = (build_collate_fn or {}) build_upstream: dict = (build_upstream or {}) build_featurizer: dict = (build_featurizer or {}) build_downstream: dict = (build_downstream or {}) build_model: dict = (build_model or {}) build_task: dict = (build_task or {}) build_optimizer: dict = (build_optimizer or {}) build_scheduler: dict = (build_scheduler or {}) save_model: dict = (save_model or {}) save_task: dict = (save_task or {}) train: dict = (train or {}) evaluate = (evaluate or {}) target_dir: Path = Path(target_dir) target_dir.mkdir(exist_ok=True, parents=True) cache_dir = Path(cache_dir) cache_dir.mkdir(exist_ok=True, parents=True) if remove_all_cache: shutil.rmtree(cache_dir) stage_id = 0 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare data') (train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False) (train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True) def check_fn(): assert (Path(train_csv).is_file() and Path(valid_csv).is_file()) for test_csv in test_csvs: assert Path(test_csv).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 1 if (start <= stage_id): logger.info(f'Stage {stage_id}: build encoder') encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=False) encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=True) def check_fn(): assert Path(encoder_path).is_file() self._stage_check(stage_id, stop, check_fn) with open(encoder_path, 'rb') as f: encoder = pickle.load(f) model_output_size = len(encoder) model = self.build_model(build_model, model_output_size, build_upstream, build_featurizer, build_downstream) frame_shift = model.downsample_rate stage_id = 2 train_dir = (target_dir / 'train') if (start <= stage_id): logger.info(f'Stage {stage_id}: Train Model') (train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, encoder_path, frame_shift, build_dataset, build_batch_sampler) (valid_ds, valid_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'valid', valid_csv, encoder_path, frame_shift, build_dataset, build_batch_sampler) with Path(encoder_path).open('rb') as f: encoder = pickle.load(f) build_model_all_args = dict(build_model=build_model, model_output_size=len(encoder), build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream) build_task_all_args_except_model = dict(build_task=build_task, encoder=encoder, valid_df=pd.read_csv(valid_csv)) self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), valid_ds, valid_bs, self.build_collate_fn(build_collate_fn, 'valid'), device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank) def check_fn(): assert (train_dir / 'valid_best').is_dir() self._stage_check(stage_id, stop, check_fn) stage_id = 3 if (start <= stage_id): test_ckpt_dir: Path = Path((test_ckpt_dir or ((target_dir / 'train') / 'valid_best'))) assert test_ckpt_dir.is_dir() logger.info(f'Stage {stage_id}: Test model: {test_ckpt_dir}') for (test_idx, test_csv) in enumerate(test_csvs): test_name = Path(test_csv).stem test_dir: Path = (((target_dir / 'evaluate') / test_ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) / test_name) test_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Stage {stage_id}.{test_idx}: Test model on {test_csv}') (test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, encoder_path, frame_shift, build_dataset, build_batch_sampler) (_, valid_best_task) = self.load_model_and_task(test_ckpt_dir, task_overrides={'test_df': pd.read_csv(test_csv)}) logs = self.evaluate(evaluate, 'test', valid_best_task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers) test_metrics = {name: float(value) for (name, value) in logs.items()} logger.info(f'test results: {test_metrics}') with (test_dir / f'result.yaml').open('w') as f: yaml.safe_dump(test_metrics, f) def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int, build_dataset: dict, build_batch_sampler: dict): logger.info(f'Build {mode} dataset') dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, encoder_path, frame_shift) logger.info(f'Build {mode} batch sampler') batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, dataset) return (dataset, batch_sampler) def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None): '\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`UtteranceClassificationTask`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n encoder: the encoder built by :obj:`build_encoder`\n\n Returns:\n Task\n ' task = UtteranceClassificationTask(model, encoder) return task
def iemocap_for_superb(target_dir: str, cache_dir: str, iemocap: str, test_fold: int, valid_ratio: float=0.2, get_path_only: bool=False): '\n Prepare IEMOCAP for emotion classfication with SUPERB protocol,\n following :obj:`SuperbER.prepare_data` format.\n\n .. note::\n\n In SUPERB protocol, you need to do 5-fold cross validation.\n\n Also, only use 4 emotion classes: :code:`happy`, :code:`angry`,\n :code:`neutral`, and :code:`sad` with balanced data points and\n the :code:`excited` class is merged into :code:`happy` class.\n\n Args:\n iemocap (str): The root path of the IEMOCAP\n test_fold (int): Which fold to use as the test fold, select from 0 to 4\n valid_ratio (float): given the remaining 4 folds, how many data to use as the validation set\n **others: refer to :obj:`SuperbER.prepare_data`\n ' target_dir = Path(target_dir) train_path = (target_dir / f'train.csv') valid_path = (target_dir / f'valid.csv') test_paths = [(target_dir / f'test.csv')] if get_path_only: return (train_path, valid_path, test_paths) corpus = IEMOCAP(iemocap) all_datapoints = corpus.all_data def format_fields(data: dict): result = dict() for data_id in data.keys(): datapoint = data[data_id] result[data_id] = dict(wav_path=datapoint['wav_path'], label=datapoint['emotion']) return result def filter_data(data_ids: List[str]): result = dict() for data_id in data_ids: data_point = all_datapoints[data_id] if (data_point['emotion'] in ['neu', 'hap', 'ang', 'sad', 'exc']): if (data_point['emotion'] == 'exc'): data_point['emotion'] = 'hap' result[data_id] = data_point return result test_session_id = (test_fold + 1) train_meta_data_json = (Path(cache_dir) / f'test_session{test_session_id}_train_metadata.json') test_meta_data_json = (Path(cache_dir) / f'test_session{test_session_id}_test_metadata.json') download(train_meta_data_json, f'https://huggingface.co/datasets/s3prl/iemocap_split/raw/4097f2b496c41eed016d4e5eb0ada4cccd46d1f3/Session{test_session_id}/train_meta_data.json', refresh=False) download(test_meta_data_json, f'https://huggingface.co/datasets/s3prl/iemocap_split/raw/4097f2b496c41eed016d4e5eb0ada4cccd46d1f3/Session{test_session_id}/test_meta_data.json', refresh=False) with open(train_meta_data_json) as f: metadata = json.load(f)['meta_data'] dev_ids = [Path(item['path']).stem for item in metadata] with open(test_meta_data_json) as f: metadata = json.load(f)['meta_data'] test_ids = [Path(item['path']).stem for item in metadata] train_len = int(((1 - valid_ratio) * len(dev_ids))) train_valid_lens = [train_len, (len(dev_ids) - train_len)] torch.manual_seed(0) (train_ids, valid_ids) = random_split(dev_ids, train_valid_lens) train_data = format_fields(filter_data(train_ids)) valid_data = format_fields(filter_data(valid_ids)) test_data = format_fields(filter_data(test_ids)) def dict_to_csv(data_dict, csv_path): keys = sorted(list(data_dict.keys())) fields = sorted(data_dict[keys[0]].keys()) data = dict() for field in fields: data[field] = [] for key in keys: data[field].append(data_dict[key][field]) data['id'] = keys df = pd.DataFrame(data) df.to_csv(csv_path, index=False) dict_to_csv(train_data, train_path) dict_to_csv(valid_data, valid_path) dict_to_csv(test_data, test_paths[0]) return (train_path, valid_path, test_paths)
class SuperbER(SuperbSID): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(iemocap=MISSING, test_fold=MISSING), build_encoder=dict(), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=4, shuffle=True), valid=dict(batch_size=4), test=dict(batch_size=4)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=30000, log_step=500, eval_step=1000, save_step=1000, gradient_clipping=1.0, gradient_accumulate=8, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`iemocap_for_superb` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n support arguments in :obj:`iemocap_for_superb`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n label (str) - a string label of the waveform\n start_sec (float) - optional, load the waveform from :code:`start_sec` seconds. If not presented or is :code:`math.nan`, load from the beginning.\n end_sec (float) - optional, load the waveform from :code:`end_sec` seconds. If not presented or is :code:`math.nan`, load to the end.\n ==================== ====================\n ' return iemocap_for_superb(**self._get_current_arguments(flatten_dict='prepare_data'))
def fsc_for_multi_classification(target_dir: str, cache_dir: str, dataset_root: str, n_jobs: int=6, get_path_only: bool=False): '\n Prepare Fluent Speech Command for multi-class classfication\n following :obj:`SuperbIC.prepare_data` format. The standard usage\n is to use three labels jointly: action, object, and location.\n\n Args:\n dataset_root (str): The root path of Fluent Speech Command\n n_jobs (int): to speed up the corpus parsing procedure\n ' target_dir = Path(target_dir) train_path = (target_dir / f'train.csv') valid_path = (target_dir / f'valid.csv') test_paths = [(target_dir / f'test.csv')] if get_path_only: return (train_path, valid_path, test_paths) def format_fields(data_points: dict): return {key: dict(wav_path=value['path'], labels=f"{value['action']} ; {value['object']} ; {value['location']}") for (key, value) in data_points.items()} corpus = FluentSpeechCommands(dataset_root, n_jobs) (train_data, valid_data, test_data) = corpus.data_split train_data = format_fields(train_data) valid_data = format_fields(valid_data) test_data = format_fields(test_data) def dict_to_csv(data_dict, csv_path): keys = sorted(list(data_dict.keys())) fields = sorted(data_dict[keys[0]].keys()) data = dict() for field in fields: data[field] = [] for key in keys: data[field].append(data_dict[key][field]) data['id'] = keys df = pd.DataFrame(data) df.to_csv(csv_path, index=False) dict_to_csv(train_data, train_path) dict_to_csv(valid_data, valid_path) dict_to_csv(test_data, test_paths[0]) return (train_path, valid_path, test_paths)
class SuperbIC(Common): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING), build_encoder=dict(), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=32, shuffle=True), valid=dict(batch_size=32), test=dict(batch_size=32)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=250, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='accuracy', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): "\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`fsc_for_multi_classification` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n arguments for :obj:`fsc_for_multi_classification`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n labels (str) - the string labels of the waveform, separated by a ';'\n ==================== ====================\n\n The number of the label columns can be arbitrary.\n " return fsc_for_multi_classification(**self._get_current_arguments(flatten_dict='prepare_data')) def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv_path: str, valid_csv_path: str, test_csv_paths: list, get_path_only: bool=False): '\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoders` from all the columns\n prefixing :code:`label` from all the csv files.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (bool): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n tokenizer_path: The tokenizer should be saved in the pickle format\n ' encoder_path = (Path(target_dir) / 'encoder.pkl') if get_path_only: return encoder_path train_csv = pd.read_csv(train_csv_path) valid_csv = pd.read_csv(valid_csv_path) test_csvs = [pd.read_csv(path) for path in test_csv_paths] all_csv = pd.concat([train_csv, valid_csv, *test_csvs]) multilabels = [[label.strip() for label in multilabel.split(';')] for multilabel in all_csv['labels'].tolist()] encoder = CategoryEncoders([single_category_labels for single_category_labels in zip(*multilabels)]) with open(encoder_path, 'wb') as f: pickle.dump(encoder, f) return encoder def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, frame_shift: int): '\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n encoder_path (str): The pickled encoder path for encoding the labels\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_ids (torch.LongTensor) - the encoded class ids. shape: (num_class, )\n labels (List[str]) - the class name. length: num_class\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n ' csv = pd.read_csv(data_csv) ids = csv['id'].tolist() audio_loader = LoadAudio(csv['wav_path'].tolist()) with open(encoder_path, 'rb') as f: encoder = pickle.load(f) label_encoder = EncodeCategories([[label.strip() for label in multilabel.split(';')] for multilabel in csv['labels'].tolist()], encoder) class Dataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] label = label_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_ids': label['class_ids'], 'labels': label['labels'], 'unique_name': ids[index]} dataset = Dataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset: Dataset): '\n Return the batch sampler for torch DataLoader.\n By default call :obj:`superb_sid_batch_sampler` with :code:`**build_batch_sampler`.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' def _build_batch_sampler(train: dict=None, valid: dict=None, test: dict=None): if (mode == 'train'): return FixedBatchSizeBatchSampler(dataset, **train) elif (mode == 'valid'): return FixedBatchSizeBatchSampler(dataset, **valid) elif (mode == 'test'): return FixedBatchSizeBatchSampler(dataset, **test) return _build_batch_sampler(**build_batch_sampler) def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`MeanPoolingLinear` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`,\n support arguments of :obj:`MeanPoolingLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`AbsUtteranceModel`\n " model = MeanPoolingLinear(downstream_input_size, downstream_output_size, **build_downstream) return model def build_task(self, build_task: dict, model: torch.nn.Module, encoder, valid_df: pd.DataFrame=None, test_df: pd.DataFrame=None): '\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`UtteranceMultiClassClassificationTask`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n encoder: the encoder built by :obj:`build_encoder`\n valid_df (pd.DataFrame): metadata of the valid set\n test_df (pd.DataFrame): metadata of the test set\n\n Returns:\n Task\n ' return UtteranceMultiClassClassificationTask(model, encoder)