code stringlengths 17 6.64M |
|---|
def extract_stats(output):
regex = ' ([^:]*):(.*)'
out = {}
matches = re.finditer(regex, output.decode('utf-8'), re.MULTILINE)
for (i, match) in enumerate(matches, start=1):
(key, value) = match.groups()
key = key.strip()
value = value.strip()
if (key == 'MACC / frame'):
out['maccs_frame'] = int(value)
pass
elif (key == 'RAM size'):
(ram, min) = value.split('(Minimum:')
out['ram_usage_max'] = parse_with_unit(ram)
out['ram_usage_min'] = parse_with_unit(min.rstrip(')'))
pass
elif (key == 'ROM size'):
out['flash_usage'] = parse_with_unit(value)
pass
return out
|
def test_ram_use():
examples = [('\n AI_ARRAY_OBJ_DECLARE(\n input_1_output_array, AI_DATA_FORMAT_FLOAT, \n NULL, NULL, 1860,\n AI_STATIC)\n AI_ARRAY_OBJ_DECLARE(\n conv2d_1_output_array, AI_DATA_FORMAT_FLOAT, \n NULL, NULL, 29760,\n AI_STATIC)\n ', {'input_1_output_array': 1860, 'conv2d_1_output_array': 29760})]
for (input, expected) in examples:
out = extract_ram_use(input)
assert (out == expected), out
|
def extract_ram_use(str):
regex = 'AI_ARRAY_OBJ_DECLARE\\(([^)]*)\\)'
matches = re.finditer(regex, str, re.MULTILINE)
out = {}
for (i, match) in enumerate(matches):
(items,) = match.groups()
items = [i.strip() for i in items.split(',')]
(name, format, _, _, size, modifiers) = items
out[name] = int(size)
return out
|
def generatecode(model_path, out_path, name, model_type, compression):
home = str(pathlib.Path.home())
version = os.environ.get('XCUBEAI_VERSION', '3.4.0')
platform_name = platform.system().lower()
if (platform_name == 'darwin'):
platform_name = 'mac'
p = 'STM32Cube/Repository/Packs/STMicroelectronics/X-CUBE-AI/{version}/Utilities/{os}/generatecode'.format(version=version, os=platform_name)
default_path = os.path.join(home, p)
cmd_path = os.environ.get('XCUBEAI_GENERATECODE', default_path)
if (not os.path.exists(out_path)):
os.makedirs(out_path)
config = generate_config(model_path, out_path, name=name, model_type=model_type, compression=compression)
config_path = os.path.join(out_path, 'config.ai')
with open(config_path, 'w') as f:
f.write(config)
args = [cmd_path, '--auto', '-c', config_path]
stdout = subprocess.check_output(args, stderr=subprocess.STDOUT)
stats = extract_stats(stdout)
assert len(stats.keys()), 'No model output. Stdout: {}'.format(stdout)
with open(os.path.join(out_path, 'network.c'), 'r') as f:
network_c = f.read()
ram = extract_ram_use(network_c)
stats['arrays'] = ram
return stats
|
def parse():
parser = argparse.ArgumentParser(description='Process some integers.')
a = parser.add_argument
supported_types = '|'.join(model_options.keys())
a('model', metavar='PATH', type=str, help='The model to convert')
a('out', metavar='DIR', type=str, help='Where to write generated output')
a('--type', default='keras', help='Type of model. {}'.format(supported_types))
a('--name', default='network', help='Name of the generated network')
a('--compression', default=None, type=int, help='Compression setting to use. Valid: 4|8')
args = parser.parse_args()
return args
|
def main():
args = parse()
test_ram_use()
stats = generatecode(args.model, args.out, name=args.name, model_type=args.type, compression=args.compression)
print('Wrote model to', args.out)
print('Model status: ', json.dumps(stats))
|
def load_model_info(jobs_dir, job_dir):
(experiment, date, time, rnd, fold) = job_dir.split('-')
hist_path = os.path.join(jobs_dir, job_dir, 'train.csv')
df = pandas.read_csv(hist_path)
df['epoch'] = (df.epoch + 1)
df['fold'] = int(fold.lstrip('fold'))
df['experiment'] = experiment
df['run'] = '-'.join([date, time, rnd])
models = []
for fname in os.listdir(os.path.join(jobs_dir, job_dir)):
if fname.endswith('model.hdf5'):
models.append(fname)
def get_epoch(s):
e = s.split('-')[0].lstrip('e')
e = int(e)
return e
models = sorted(models, key=get_epoch)
assert models[0].startswith('e01-')
last_model = models[(len(models) - 1)]
expected_last = 'e{:02d}-'.format(len(models))
assert last_model.startswith(expected_last), (last_model, expected_last)
df['model_path'] = [os.path.join(jobs_dir, job_dir, m) for m in models]
return df
|
def load_train_history(jobs_dir, limit=None):
jobs = os.listdir(jobs_dir)
if limit:
matching = [d for d in jobs if (limit in d)]
else:
matching = jobs
dataframes = []
for job_dir in matching:
try:
df = load_model_info(jobs_dir, job_dir)
except (FileNotFoundError, ValueError) as e:
print('Failed to load job {}: {}'.format(job_dir, str(e)))
continue
dataframes.append(df)
df = pandas.concat(dataframes)
return df
|
def test_load_history():
jobs_dir = '../../jobs'
job_id = 'sbcnn44k128aug-20190227-0220-48ba'
df = load_history()
|
def pick_best(history, n_best=1):
def best_by_loss(df):
return df.sort_values('voted_val_acc', ascending=False).head(n_best)
return history.groupby(['experiment', 'fold']).apply(best_by_loss)
|
def evaluate_model(predictor, model_path, val_data, test_data):
def score(model, data):
y_true = data.classID
p = predictor(model, data)
y_pred = numpy.argmax(p, axis=1)
acc = sklearn.metrics.accuracy_score(y_true, y_pred)
labels = list(range(len(urbansound8k.classnames)))
confusion = sklearn.metrics.confusion_matrix(y_true, y_pred, labels=labels)
return (acc, confusion)
model = keras.models.load_model(model_path)
salience_info = {'foreground': 1, 'background': 2}
test_info = {'val': val_data, 'test': test_data}
out = {}
for (setname, data) in test_info.items():
for (variant, salience) in salience_info.items():
key = '{}_{}'.format(setname, variant)
(acc, confusion) = score(model, data[(data.salience == salience)])
print('acc for ', key, acc)
out[key] = confusion
out['val'] = (out['val_foreground'] + out['val_background'])
out['test'] = (out['test_foreground'] + out['test_background'])
return out
|
def evaluate(models, folds_data, predictor, out_dir, dry_run=False):
def eval_experiment(df):
results = {}
by_fold = df.sort_index(level='fold', ascending=True)
for (idx, row) in by_fold.iterrows():
fold = row['fold']
assert (fold > 0), 'fold number should be 1 indexed'
print('Testing model {} fold={}'.format(row['experiment'], fold))
model_path = row['model_path']
val = folds_data[(fold - 1)][1]
test = folds_data[(fold - 1)][2]
test_folds = test.fold.unique()
assert (len(test_folds) == 1)
assert (test_folds[0] == fold)
val_folds = val.fold.unique()
assert (len(val_folds) == 1)
assert (val_folds[0] != fold)
train_data = folds_data[(fold - 1)][0]
train_files = set(train_data.slice_file_name.unique())
assert (len(train_files) > 6500), len(train_files)
test_files = set(test.slice_file_name.unique())
assert (len(test_files) > 700)
common_files = train_files.intersection(test_files)
assert (len(common_files) == 0), common_files
if dry_run:
val = test[0:20]
test = test[0:20]
result = evaluate_model(predictor, model_path, val, test)
for (k, v) in result.items():
if (results.get(k) is None):
results[k] = []
results[k].append(v)
exname = df['experiment'].unique()[0]
results_path = os.path.join(out_dir, '{}.confusion.npz'.format(exname))
numpy.savez(results_path, **results)
print('Wrote', results_path)
return results_path
out = models.groupby(level='experiment').apply(eval_experiment)
return out
|
def parse(args):
import argparse
parser = argparse.ArgumentParser(description='Test trained models')
a = parser.add_argument
common.add_arguments(parser)
a('--run', dest='run', default='', help='%(default)s')
a('--check', action='store_true', default='', help='Run a check pass, not actually evaluating')
a('--skip-stats', action='store_true', default='', help='Do not compute on-device stats')
a('--out', dest='results_dir', default='./data/results', help='%(default)s')
parsed = parser.parse_args(args)
return parsed
|
def main():
args = parse(sys.argv[1:])
out_dir = os.path.join(args.results_dir, args.run)
common.ensure_directories(out_dir)
urbansound8k.maybe_download_dataset(args.datasets_dir)
data = urbansound8k.load_dataset()
folds = urbansound8k.folds(data)
exsettings = common.load_settings_path(args.settings_path)
frames = exsettings['frames']
voting = exsettings['voting']
overlap = exsettings['voting_overlap']
settings = features.settings(exsettings)
def load_sample(sample):
return features.load_sample(sample, settings, start_time=sample.start, window_frames=frames, feature_dir=args.features_dir, normalize=exsettings['normalize'])
def predict(model, data):
return features.predict_voted(exsettings, model, data, loader=load_sample, method=voting, overlap=overlap)
history = load_train_history(args.models_dir, args.run)
n_folds = len(history.fold.unique())
n_experiments = len(history.experiment.unique())
print('Found {} experiments across {} folds', n_folds, n_experiments)
best = pick_best(history)
print('Best models\n', best[['epoch', 'fold', 'voted_val_acc']])
print('Computing model info')
def get_stats(row):
ex = row.iloc[0]
model = ex['model_path']
(model_stats, layer_info) = stats.model_info(model)
layer_info_path = os.path.join(out_dir, '{}.layers.csv'.format(ex['experiment']))
layer_info.to_csv(layer_info_path)
return pandas.Series(model_stats)
if (not args.skip_stats):
model_stats = best.groupby(level='experiment').apply(get_stats)
print('Model stats\n', model_stats)
model_stats.to_csv(os.path.join(out_dir, 'stm32stats.csv'))
print('Testing models...')
results = evaluate(best, folds, predictor=predict, out_dir=out_dir, dry_run=args.check)
|
def maybe_download_dataset(workdir):
if (not os.path.exists(workdir)):
os.makedirs(workdir)
dir_path = os.path.join(workdir, 'UrbanSound8K')
archive_path = (dir_path + '.tar.gz')
last_progress = None
def download_progress(count, blocksize, totalsize):
nonlocal last_progress
p = int((((count * blocksize) * 100) / totalsize))
if (p != last_progress):
print('\r{}%'.format(p), end='\r')
last_progress = p
if (not os.path.exists(dir_path)):
print('Could not find', dir_path)
if (not os.path.exists(archive_path)):
u = download_urls[0]
print('Downloading...', u)
urllib.request.urlretrieve(u, archive_path, reporthook=download_progress)
print('Extracting...', archive_path)
with tarfile.open(archive_path, 'r:gz') as archive:
archive.extractall(workdir)
return dir_path
|
def load_dataset():
metadata_path = os.path.join(here, 'datasets/UrbanSound8K.csv')
samples = pandas.read_csv(metadata_path)
return samples
|
def sample_path(sample, dataset_path=None):
if (not dataset_path):
dataset_path = default_path
return os.path.join(dataset_path, 'audio', ('fold' + str(sample.fold)), sample.slice_file_name)
|
def folds(data):
fold_idxs = folds_idx(n_folds=10)
assert (len(fold_idxs) == 10)
folds = []
for fold in fold_idxs:
(train, val, test) = fold
train = (numpy.array(train) + 1)
val = (numpy.array(val) + 1)
test = (numpy.array(test) + 1)
fold_train = data[data.fold.isin(train)]
fold_val = data[data.fold.isin(val)]
fold_test = data[data.fold.isin(test)]
train_folds = set(fold_train.fold.unique())
val_folds = set(fold_val.fold.unique())
test_folds = set(fold_test.fold.unique())
assert (len(train_folds) == 8), len(train_folds)
assert (train_folds.intersection(val_folds) == set())
assert (train_folds.intersection(test_folds) == set())
assert (val_folds.intersection(test_folds) == set())
folds.append((fold_train, fold_val, fold_test))
return folds
|
def ensure_valid_fold(fold, n_folds=10):
(train, val, test) = fold
assert (len(train) == (n_folds - 2)), len(train)
assert (0 <= train[0] < n_folds), train[0]
assert (len(val) == 1), len(val)
assert (0 <= val[0] < n_folds), val[0]
assert (len(test) == 1), len(test)
assert (0 <= test[0] < n_folds), test[0]
assert (test[0] != val[0])
test_overlap = set(train).intersection(set(test))
val_overlap = set(train).intersection(set(val))
assert (test_overlap == set()), test_overlap
assert (val_overlap == set()), val_overlap
assert (sorted(((train + val) + test)) == list(range(0, n_folds)))
return True
|
def folds_idx(n_folds):
'Generate fold indices for cross-validation.\n Each fold has 1 validation, 1 test set and the remaining train'
test_fold = 10
folds = []
all_folds = list(range(0, n_folds))
for idx in range(0, n_folds):
test = [all_folds[idx]]
val = [all_folds[(idx - 1)]]
train = list(set(all_folds).difference(set((test + val))))
fold = (train, val, test)
ensure_valid_fold(fold)
folds.append(fold)
assert (len(folds) == n_folds), len(folds)
return folds
|
def sbcnn_generator(n_iter=400, random_state=1):
from sklearn.model_selection import ParameterSampler
params = dict(kernel_t=range(3, 10, 2), kernel_f=range(3, 10, 2), pool_t=range(2, 5), pool_f=range(2, 5), kernels_start=range(16, 64), fully_connected=range(16, 128))
sampler = ParameterSampler(params, n_iter=n_iter, random_state=random_state)
out_models = []
out_total_params = []
for p in sampler:
s = {'model': 'sbcnn', 'frames': 31, 'n_mels': 60, 'samplerate': 22050}
pool = (p['pool_f'], p['pool_t'])
kernel = (p['kernel_f'], p['kernel_t'])
for (k, v) in p.items():
s[k] = v
s['pool'] = pool
s['kernel'] = kernel
(yield (p, s))
|
def generate_models():
gen = sbcnn_generator()
data = {'model_path': [], 'gen_path': [], 'id': []}
for out in iter(gen):
model = None
try:
(params, settings) = out
model = models.build(settings.copy())
except ValueError as e:
print('Error:', e)
continue
for (k, v) in params.items():
if (data.get(k) is None):
data[k] = []
data[k].append(v)
model_id = str(uuid.uuid4())
out_dir = os.path.join('scan', model_id)
os.makedirs(out_dir)
model_path = os.path.join(out_dir, 'model.orig.hdf5')
out_path = os.path.join(out_dir, 'gen')
model.save(model_path)
stats = stm32convert.generatecode(model_path, out_path, name='network', model_type='keras', compression=None)
data['model_path'].append(model_path)
data['gen_path'].append(out_path)
data['id'].append(model_id)
for (k, v) in stats.items():
if (data.get(k) is None):
data[k] = []
data[k].append(v)
df = pandas.DataFrame(data)
return df
|
def main():
df = generate_models()
df.to_csv('scan.csv')
|
def main():
settings = common.load_experiment('experiments', 'ldcnn20k60')
def build():
return train.sb_cnn(settings)
m = build()
m.summary()
m.save('model.wip.hdf5')
s = settings
shape = (s['n_mels'], s['frames'], 1)
model_stats = stats.analyze_model(build, [shape], n_classes=10)
(flops, params) = model_stats
inference_flops = {name: v for (name, v) in flops.items() if (not stats.is_training_scope(name))}
for (name, flop) in inference_flops.items():
print(name, flop)
|
def check_missing(df, field, name='name'):
missing = df[df[field].isna()]
if len(missing):
print('WARN. Missing "{}" for {}'.format(field, list(missing[name])))
|
def logmel_models(data_path):
df = pandas.read_csv(data_path)
df = df[df['features'].str.contains('logmel')]
df.index = df['name']
df['params'] = (df['kparams'] * 1000.0)
df['window'] = ((df.frames * df.hop) / df.samplerate)
df['t_step'] = (df.hop / df.samplerate)
df['f_res'] = (df.samplerate / df.bands)
df['macc_s'] = ((df['mmacc'] * 1000000.0) / df.window)
return df
|
def model_table(data_path):
df = logmel_models(data_path)
table = pandas.DataFrame()
table['Accuracy (%)'] = (df.accuracy * 100)
table['MACC / second'] = ['{} M'.format(int((v / 1000000.0))) for v in df.macc_s]
table['Model parameters'] = ['{} k'.format(int((v / 1000.0))) for v in df.params]
table = table.sort_values('Accuracy (%)', ascending=False)
return table.to_latex(column_format='lrrr')
|
def plot_models(data_path, figsize=(12, 4), max_params=128000.0, max_maccs=4500000.0):
df = logmel_models(data_path)
(fig, ax) = plt.subplots(1, figsize=figsize)
check_missing(df, 'accuracy')
check_missing(df, 'kparams')
check_missing(df, 'mmacc')
df.plot.scatter(x='params', y='macc_s', logx=True, logy=True, ax=ax)
ax.set_xlabel('Model parameters')
ax.set_ylabel('MACC / second')
feasible_x = max_params
feasible_y = max_maccs
x = [0, feasible_x, feasible_x, 0]
y = [0, 0, feasible_y, feasible_y]
ax.fill(x, y, color='green', alpha=0.5)
linestyle = dict(color='black', linewidth=0.5)
ax.axvline(feasible_x, **linestyle)
ax.axhline(feasible_y, **linestyle)
def add_labels(row):
xy = (row.params, row.macc_s)
label = '{} {:.1f}%'.format(row['name'], (100 * row.accuracy))
ax.annotate(label, xy, xytext=(5, 40), textcoords='offset points', size=12, rotation=25, color='darkslategrey')
df.apply(add_labels, axis=1)
fig.tight_layout()
return fig
|
def flatten(list):
out = []
for x in list:
for y in x:
out.append(y)
return out
|
def plot_spectrogram(f, ax=None, cmap=None):
(y, sr) = librosa.load(f, sr=44100)
fig = None
if (not ax):
(fig, ax) = plt.subplots(1, figsize=(16, 4))
S = numpy.abs(librosa.stft(y))
S = librosa.amplitude_to_db(S, ref=numpy.max)
kwargs = dict(ax=ax, y_axis='log', x_axis='time', sr=sr)
if (cmap is not None):
kwargs['cmap'] = cmap
librosa.display.specshow(S, **kwargs)
return fig
|
def plot_spectrograms(files, titles, out=None):
assert (len(files) == len(titles))
(fig, axs) = plt.subplots(2, (len(files) // 2), sharex=True, figsize=(16, 6))
axs = flatten(axs)
for (i, (path, title, ax)) in enumerate(zip(files, titles, axs)):
plot_spectrogram(path, ax=ax)
ax.set_title(title)
if ((i != 0) and (i != (len(files) / 2))):
ax.set_ylabel('')
ax.set_yticks([])
if (i < (len(files) / 2)):
ax.set_xlabel('')
if out:
fig.savefig(out, bbox_inches='tight', pad_inches=0)
return fig
|
def plot_examples(examples):
examples = urbansound8k_examples
here = os.path.dirname(__file__)
base = os.path.join(here, '../microesc/../data/datasets/UrbanSound8K/audio/')
paths = [os.path.join(base, e[0]) for e in examples.values()]
fig = plot_spectrograms(paths, examples.keys())
return fig
|
def main():
plotname = os.path.basename(sys.argv[1])
here = os.path.dirname(__file__)
plot_func = plots.get(plotname, None)
if (not plot_func):
sys.stderr.write('Plot {} not found. Supported: \n{}'.format(plotname, plots.keys()))
return 1
out = plot_func()
out_path = os.path.join(here, 'plots', plotname)
if (not os.path.exists(os.path.dirname(out_path))):
os.makedirs(os.path.dirname(out_path))
ext = os.path.splitext(plotname)[1]
if (ext == '.png'):
out.savefig(out_path, bbox_inches='tight')
elif (ext == '.tex'):
with open(out_path, 'w') as f:
f.write(out)
else:
raise ValueError('Unknown extension {}'.format(ext))
|
def strformat(fmt, series):
return [fmt.format(i) for i in series]
|
def downsample_from_name(name):
if name.startswith('Stride'):
return 'stride'
else:
return 'maxpool'
|
def accuracies(df, col):
mean = (df[(col + '_mean')] * 100)
std = (df[(col + '_std')] * 100)
fmt = ['{:.1f}% +-{:.1f}'.format(*t) for t in zip(mean, std)]
return fmt
|
def cpu_use(df):
usage = (((df.utilization * 1000) * 1) / df.classifications_per_second).astype(int)
return ['{:d} ms'.format(i).ljust(3) for i in usage]
|
def plot_augmentations(y, sr, time_shift=3000, pitch_shift=12, time_stretch=1.3):
augmentations = {'Original': y, 'Timeshift left': y[time_shift:], 'Timeshift right': numpy.concatenate([numpy.zeros(time_shift), y[:(- time_shift)]]), 'Timestretch faster': librosa.effects.time_stretch(y, time_stretch), 'Timestretch slower': librosa.effects.time_stretch(y, (1 / time_stretch)), 'Pitchshift up': librosa.effects.pitch_shift(y, sr, pitch_shift), 'Pitchshift down': librosa.effects.pitch_shift(y, sr, (- pitch_shift))}
layout = [['Original', 'Original', 'Original'], ['Timeshift right', 'Timestretch faster', 'Pitchshift up'], ['Timeshift left', 'Timestretch slower', 'Pitchshift down']]
shape = numpy.array(layout).shape
(fig, axs) = plt.subplots(shape[0], shape[1], figsize=(16, 6), sharex=True)
for row in range(shape[0]):
for col in range(shape[1]):
description = layout[row][col]
ax = axs[row][col]
data = augmentations[description]
S = numpy.abs(librosa.stft(data))
S = scipy.ndimage.filters.gaussian_filter(S, 0.7)
S = librosa.amplitude_to_db(S, ref=numpy.max)
S -= S.mean()
librosa.display.specshow(S, ax=ax, sr=sr, y_axis='hz')
ax.set_ylim(0, 5000)
ax.set_title(description)
return fig
|
def main():
path = '163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav'
(y, sr) = librosa.load(path, offset=0.1, duration=1.2)
fig = plot_augmentations(y, sr)
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight')
|
def bandpass_filter(lowcut, highcut, fs, order, output='sos'):
assert ((order % 2) == 0), 'order must be multiple of 2'
assert ((highcut * 0.95) < (fs / 2.0)), 'highcut {} above Nyquist for fs={}'.format(highcut, fs)
assert (lowcut > 0.0), 'lowcut must be above 0'
nyq = (0.5 * fs)
low = (lowcut / nyq)
high = min((highcut / nyq), 0.99)
output = scipy.signal.butter((order / 2), [low, high], btype='band', output=output)
return output
|
def filterbank(center, fraction, fs, order):
reference = acoustics.octave.REFERENCE
center = [f for f in center if (f < (fs / 2.0))]
center = numpy.asarray(center)
indices = acoustics.octave.index_of_frequency(center, fraction=fraction, ref=reference)
center = acoustics.octave.exact_center_frequency(None, fraction=fraction, n=indices, ref=reference)
lower = acoustics.octave.lower_frequency(center, fraction=fraction)
upper = acoustics.octave.upper_frequency(center, fraction=fraction)
nominal = acoustics.octave.nominal_center_frequency(None, fraction, indices)
def f(low, high):
return bandpass_filter(low, high, fs=fs, order=order)
filterbank = [f(low, high) for (low, high) in zip(lower, upper)]
return (nominal, filterbank)
|
def third_octave_filterbank(fs, order=8):
from acoustics.standards import iec_61672_1_2013 as iec_61672
center = iec_61672.NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES
return filterbank(center, fraction=3, fs=fs, order=order)
|
def plot_filterbank_oct(ax, fs=44100):
filterbank = third_octave_filterbank((fs / 2))
for (center, sos) in zip(filterbank[0], filterbank[1]):
(w, h) = scipy.signal.sosfreqz(sos, worN=4096, fs=fs)
db = (20 * numpy.log10((numpy.abs(h) + 1e-09)))
ax.plot(w, db)
ax.set_title('1/3 octave')
ax.set_ylabel('Attenuation (dB)')
ax.set_ylim((- 60), 5)
ax.set_xlim(20.0, 20000.0)
|
def plot_filterbank_gammatone(ax, fs=44100):
np = numpy
from pyfilterbank import gammatone
gfb = gammatone.GammatoneFilterbank(samplerate=44100, startband=(- 6), endband=26, density=1.5)
def plotfun(x, y):
xx = (x * fs)
ax.plot(xx, (20 * np.log10((np.abs(y) + 1e-09))))
gfb.freqz(nfft=(2 * 4096), plotfun=plotfun)
ax.set_title('Gammatone')
ax.set_ylim([(- 80), 1])
ax.set_xlim([10, 20000.0])
return gfb
|
def plot_filterbank_mel(ax, n_mels=32, n_fft=4097, fmin=10, fmax=22050, fs=44100):
from pyfilterbank import melbank
(melmat, (melfreq, fftfreq)) = melbank.compute_melmat(n_mels, fmin, fmax, num_fft_bands=4097, sample_rate=fs)
ax.plot(fftfreq, (20 * numpy.log10((melmat.T + 1e-09))))
ax.set_title('Mel-scale')
ax.set_xlabel('Frequency (Hz)')
|
def main():
(fig, (gt_ax, oct_ax, mel_ax)) = plt.subplots(3, sharex=True, sharey=True, figsize=(12, 5))
axes = fig.gca()
plot_filterbank_gammatone(gt_ax)
plot_filterbank_mel(mel_ax)
plot_filterbank_oct(oct_ax)
axes.set_ylim([(- 40), 3])
axes.set_xlim([100, 20000])
fig.tight_layout()
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight')
|
def plot_logloss(figsize=(6, 3)):
(fig, ax) = plt.subplots(1, figsize=figsize)
yhat = numpy.linspace(0.0, 1.0, 300)
losses_0 = [log_loss([0], [x], labels=[0, 1]) for x in yhat]
losses_1 = [log_loss([1], [x], labels=[0, 1]) for x in yhat]
ax.plot(yhat, losses_0, label='true=0')
ax.plot(yhat, losses_1, label='true=1')
ax.legend()
ax.set_ylim(0, 8)
ax.set_xlim(0, 1)
return fig
|
def main():
fig = plot_logloss()
fig.tight_layout()
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight')
|
def arglist(options):
args = ['--{}={}'.format(k, v) for (k, v) in options.items()]
return args
|
def command_for_job(options):
args = ['python3', 'train.py']
args += arglist(options)
return args
|
def generate_train_jobs(experiments, settings_path, folds, overrides):
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M')
unique = str(uuid.uuid4())[0:4]
def name(experiment, fold):
name = '-'.join([experiment, timestamp, unique])
return (name + '-fold{}'.format(fold))
def job(exname, experiment):
for fold in folds:
n = name(exname, fold)
options = {'name': n, 'fold': fold, 'settings': settings_path}
for (k, v) in experiment.items():
options[k] = v
for (k, v) in overrides.items():
options[k] = v
cmd = command_for_job(options)
return cmd
jobs = [job(str(idx), ex) for (idx, ex) in experiments.iterrows()]
return jobs
|
def parse(args):
import argparse
parser = argparse.ArgumentParser(description='Generate jobs')
a = parser.add_argument
a('--models', default='models.csv', help='%(default)s')
a('--settings', default='experiments/ldcnn20k60.yaml', help='%(default)s')
a('--jobs', dest='jobs_dir', default='./data/jobs', help='%(default)s')
a('--check', action='store_true', help='Only run a pre-flight check')
parsed = parser.parse_args(args)
return parsed
|
def main():
args = parse(sys.argv[1:])
models = pandas.read_csv(args.models)
settings = common.load_settings_path(args.settings)
overrides = {}
folds = list(range(0, 9))
if args.check:
folds = (1,)
overrides['train_samples'] = (settings['batch'] * 1)
overrides['val_samples'] = (settings['batch'] * 1)
cmds = generate_train_jobs(models, args.settings, folds, overrides)
print('\n'.join((' '.join(cmd) for cmd in cmds)))
|
@pytest.mark.skip('fails right now')
@pytest.mark.parametrize('family', FAMILIES)
def test_models_basic(family):
s = settings.load_settings({'model': family, 'frames': 31, 'n_mels': 60, 'samplerate': 22050})
if (family == 'sbcnn'):
s['downsample_size'] = (3, 2)
s['conv_size'] = (3, 3)
if (family == 'strided'):
s['downsample_size'] = (3, 3)
s['conv_size'] = (3, 3)
s['conv_block'] = 'conv'
s['filters'] = 12
m = models.build(s)
assert isinstance(m, keras.Model)
|
@pytest.mark.parametrize('conv_type', CONV_TYPES)
def test_strided_variations(conv_type):
s = settings.load_settings({'model': 'strided', 'frames': 31, 'n_mels': 60, 'samplerate': 22050, 'conv_block': conv_type, 'filters': 20})
s['conv_size'] = (3, 3)
s['downsample_size'] = (2, 2)
m = models.build(s)
assert isinstance(m, keras.Model)
|
def test_conv_ds():
k = (5, 5)
i = (60, 31, 16)
ch = 16
conv = stats.compute_conv2d(*i, ch, *k)
ds = stats.compute_conv2d_ds(*i, ch, *k)
ratio = (conv / ds)
assert (ratio > 9.0)
|
def test_conv_ds3x3():
k = (3, 3)
i = (60, 31, 64)
ch = 64
conv = stats.compute_conv2d(*i, ch, *k)
ds = stats.compute_conv2d_ds(*i, ch, *k)
ratio = (conv / ds)
assert (ratio > 7.5)
|
@pytest.mark.skip('fails')
def test_generator_fake_loader():
dataset_path = 'data/UrbanSound8K/'
urbansound8k.default_path = dataset_path
data = urbansound8k.load_dataset()
(folds, test) = urbansound8k.folds(data)
data_length = 16
batch_size = 8
frames = 72
bands = 32
n_classes = 10
def zero_loader(s):
return numpy.zeros((bands, frames, 1))
fold = folds[0][0]
X = fold[0:data_length]
Y = fold.classID[0:data_length]
g = train.dataframe_generator(X, Y, loader=zero_loader, batchsize=batch_size, n_classes=n_classes)
n_batches = 3
batches = list(itertools.islice(g, n_batches))
assert (len(batches) == n_batches)
assert (len(batches[0]) == 2)
assert (batches[0][0].shape == (batch_size, bands, frames, 1))
assert (batches[0][1].shape == (batch_size, n_classes))
|
def test_windows_shorter_than_window():
frame_samples = 256
window_frames = 64
fs = 16000
length = (0.4 * fs)
w = list(features.sample_windows(int(length), frame_samples, window_frames))
assert (len(w) == 1), len(w)
assert (w[(- 1)][1] == length)
|
def test_window_typical():
frame_samples = 256
window_frames = 64
fs = 16000
length = (4.0 * fs)
w = list(features.sample_windows(int(length), frame_samples, window_frames))
assert (len(w) == 8), len(w)
assert (w[(- 1)][1] == length)
|
def _test_predict_windowed():
t = test[0:10]
sbcnn16k32_settings = dict(feature='mels', samplerate=16000, n_mels=32, fmin=0, fmax=8000, n_fft=512, hop_length=256, augmentations=5)
def load_sample32(sample):
return features.load_sample(sample, sbcnn16k32_settings, window_frames=72, feature_dir='../../scratch/aug')
mean_m = features.predict_voted(sbcnn16k32_settings, model, t, loader=load_sample32, method='mean')
|
def test_precompute():
settings = dict(feature='mels', samplerate=16000, n_mels=32, fmin=0, fmax=8000, n_fft=512, hop_length=256, augmentations=12)
dir = './pre2'
if os.path.exists(dir):
shutil.rmtree(dir)
workdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/'))
data = urbansound8k.load_dataset()
urbansound8k.maybe_download_dataset(workdir)
d = os.path.join(dir, features.settings_id(settings))
expect_path = features.feature_path(data.iloc[0], d)
assert (not os.path.exists(expect_path)), expect_path
preprocess.precompute(data[0:4], settings, out_dir=d, verbose=0, force=True, n_jobs=2)
assert os.path.exists(expect_path), expect_path
|
def test_grouped_confusion():
cm = numpy.array([[82, 0, 3, 0, 0, 10, 0, 4, 1, 0], [3, 29, 0, 0, 0, 0, 1, 0, 0, 0], [4, 3, 37, 14, 4, 4, 0, 0, 2, 32], [5, 2, 5, 78, 4, 0, 0, 0, 0, 6], [23, 2, 4, 1, 55, 4, 2, 6, 3, 0], [9, 0, 0, 4, 3, 70, 0, 5, 1, 1], [0, 0, 0, 5, 0, 0, 27, 0, 0, 0], [0, 0, 2, 0, 1, 1, 1, 91, 0, 0], [9, 11, 9, 4, 0, 1, 0, 0, 46, 3], [1, 7, 7, 0, 7, 0, 0, 0, 3, 75]])
(gcm, gnames) = report.grouped_confusion(cm, report.groups)
assert (numpy.sum(cm) == numpy.sum(gcm))
assert (gnames[0] == 'social_activity')
assert (gnames[3] == 'domestic_machines')
expect_correct_social = (((((37 + 78) + 75) + (14 + 32)) + (5 + 6)) + (7 + 0))
assert (gcm[0][0] == expect_correct_social), (gcm[0][0], expect_correct_social)
assert (gcm[3][3] == 82)
|
@pytest.mark.parametrize('example', CORRECT_FOLDS.keys())
def test_ensure_valid_fold_passes_correct(example):
fold = CORRECT_FOLDS[example]
folds.ensure_valid_fold(fold)
|
@pytest.mark.parametrize('example', WRONG_FOLDS.keys())
def test_ensure_valid_fold_detects_wrong(example):
fold = WRONG_FOLDS[example]
with pytest.raises(AssertionError) as e_info:
folds.ensure_valid_fold(fold)
|
def test_folds_idx():
f = folds.folds_idx(10)
print(('\n' + '\n'.join([str(i) for i in f])))
assert (f[0][2][0] == 0), 'first test fold should be 0'
assert (f[(- 1)][2][0] == 9), 'last test fold should be 9'
|
def test_folds():
data = urbansound8k.load_dataset()
f = urbansound8k.folds(data)
assert (len(f) == 10)
|
class ConveRTModelConfig(NamedTuple):
num_embed_hidden: int = 512
feed_forward1_hidden: int = 2048
feed_forward2_hidden: int = 1024
num_attention_project: int = 64
vocab_size: int = 25000
num_encoder_layers: int = 6
dropout_rate: float = 0.0
n: int = 121
relative_attns: list = [3, 5, 48, 48, 48, 48]
num_attention_heads: int = 2
token_sequence_truncation: int = 60
|
class ConveRTTrainConfig(NamedTuple):
sp_model_path: str = os.path.join(dirname, 'data/en.wiki.bpe.vs25000.model')
dataset_path: str = os.path.join(dirname, 'data/sample-dataset.json')
test_dataset_path: str = 'data/sample-dataset.json'
model_save_dir: str = 'lightning_logs/checkpoints/'
log_dir: str = 'lightning_logs'
device: str = 'cpu'
use_data_paraller: bool = True
is_reddit: bool = True
train_batch_size: int = 64
test_batch_size: int = 256
split_size: int = 8
learning_rate: float = 0.001
lr_warmup_start: float = 0.1
lr_warmup_end: float = 1.0
warmup_batch: float = 10000
final_batch: float = 100000000.0
learning_rate_end: float = 0.0001
epochs: int = 10
grad_norm_clip: float = 1.0
smoothing: float = 0.2
l2_weight_decay: float = 1e-05
|
class LossFunction(nn.Module):
@staticmethod
def cosine_similarity_matrix(context_embed: torch.Tensor, reply_embed: torch.Tensor) -> torch.Tensor:
assert (context_embed.size(0) == reply_embed.size(0))
cosine_similarity = torch.matmul(context_embed, reply_embed.T)
return cosine_similarity
def forward(self, context_embed: torch.Tensor, reply_embed: torch.Tensor) -> torch.Tensor:
cosine_similarity = self.cosine_similarity_matrix(context_embed, reply_embed)
j = (- torch.sum(torch.diagonal(cosine_similarity)))
cosine_similarity.diagonal().copy_(torch.zeros(cosine_similarity.size(0)))
j = ((0.8 * j) + ((0.2 / (cosine_similarity.size(0) * (cosine_similarity.size(0) - 1))) * torch.sum(cosine_similarity)))
j += torch.sum(torch.logsumexp(cosine_similarity, dim=0))
return j
|
@dataclass
class EncoderInputFeature():
input_ids: torch.Tensor
attention_mask: torch.Tensor
position_ids: torch.Tensor
input_lengths: torch.Tensor
def pad_sequence(self, seq_len: int):
self.input_ids = pad(self.input_ids, [0, (seq_len - self.input_ids.size(0))], 'constant', 0)
self.attention_mask = pad(self.attention_mask, [0, (seq_len - self.attention_mask.size(0))], 'constant', 0)
self.position_ids = pad(self.position_ids, [0, (seq_len - self.position_ids.size(0))], 'constant', 0)
|
@dataclass
class EmbeddingPair():
context: EncoderInputFeature
reply: EncoderInputFeature
|
class DataModule(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.input_attributes = ['input_ids', 'attention_mask', 'position_ids', 'input_lengths']
def batching_input_features(self, encoder_inputs: List[EncoderInputFeature]) -> EncoderInputFeature:
max_seq_len = max([int(encoder_input.input_lengths.item()) for encoder_input in encoder_inputs])
for encoder_input in encoder_inputs:
encoder_input.pad_sequence(max_seq_len)
batch_features = {feature_name: torch.stack([getattr(encoder_input, feature_name) for encoder_input in encoder_inputs], dim=0) for feature_name in self.input_attributes}
return EncoderInputFeature(**batch_features)
def convert_collate_fn(self, features: List[EmbeddingPair]) -> EmbeddingPair:
return EmbeddingPair(context=self.batching_input_features([feature.context for feature in features]), reply=self.batching_input_features([feature.reply for feature in features]))
def train_dataloader(self, train_dataset):
return DataLoader(train_dataset, config.train_batch_size, collate_fn=self.convert_collate_fn, drop_last=True)
def val_dataloader(self):
pass
def test_dataloader(self):
pass
|
class DatasetInstance(NamedTuple):
context: List[str]
response: str
|
def load_instances_from_reddit_json(dataset_path: str) -> List[DatasetInstance]:
instances: List[DatasetInstance] = []
with open(dataset_path) as f:
for line in f:
x = json.loads(line)
context_keys = sorted([key for key in x.keys() if ('context' in key)])
instance = DatasetInstance(context=[x[key] for key in context_keys], response=x['response'])
instances.append(instance)
return instances
|
class RedditData(torch.utils.data.Dataset):
def __init__(self, instances: List[DatasetInstance], sp_processor: SentencePieceProcessor, truncation_length: int):
self.sp_processor = sp_processor
self.instances = instances
self.truncation_length = truncation_length
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
context_str = self.instances[item].context[0]
context_embedding = self._convert_instance_to_embedding(context_str)
reply_embedding = self._convert_instance_to_embedding(self.instances[item].response)
return EmbeddingPair(context=context_embedding, reply=reply_embedding)
def _convert_instance_to_embedding(self, input_str: str) -> EncoderInputFeature:
input_ids = self.sp_processor.EncodeAsIds(input_str)
if self.truncation_length:
input_ids = input_ids[:self.truncation_length]
attention_mask = [1 for _ in range(len(input_ids))]
position_ids = [i for i in range(len(input_ids))]
return EncoderInputFeature(input_ids=torch.tensor(input_ids).to(config.device), attention_mask=torch.tensor(attention_mask).to(config.device), position_ids=torch.tensor(position_ids).to(config.device), input_lengths=torch.tensor(len(input_ids)).to(config.device))
|
class LearningRateDecayCallback(pl.Callback):
def __init__(self, config, lr_decay=True):
super().__init__()
self.lr_warmup_end = config.lr_warmup_end
self.lr_warmup_start = config.lr_warmup_start
self.learning_rate = config.learning_rate
self.warmup_batch = config.warmup_batch
self.final_batch = config.final_batch
self.lr_decay = lr_decay
def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
'\n\n :param trainer:\n :type trainer:\n :param pl_module:\n :type pl_module:\n :param batch:\n :type batch:\n :param batch_idx:\n :type batch_idx:\n :param dataloader_idx:\n :type dataloader_idx:\n '
optimizer = trainer.optimizers[0]
if self.lr_decay:
if (batch_idx < self.warmup_batch):
lr_mult = (float(batch_idx) / float(max(1, self.warmup_batch)))
lr = (self.lr_warmup_start + (lr_mult * (self.lr_warmup_end - self.lr_warmup_start)))
else:
progress = (float((batch_idx - self.warmup_batch)) / float(max(1, (self.final_batch - self.warmup_batch))))
lr = max((self.learning_rate + ((0.5 * (1.0 + math.cos((math.pi * progress)))) * (self.lr_warmup_end - self.learning_rate))), self.learning_rate)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
|
def find_subword_params(model):
'Long winded helper fn to return Subword Embedding Params for clipping, as they are the only parameters that\n are gradient clipped in the paper, only calculated once after model instantiation, but before training'
embeds = set()
for (mn, m) in model.named_modules():
for (pn, p) in m.named_parameters():
if mn.startswith('transformer_layers.subword_embedding'):
fpn = (('%s.%s' % (mn, pn)) if mn else pn)
embeds.add(fpn)
param_dict = {pn: p for (pn, p) in model.named_parameters()}
return ([param_dict[pn] for pn in sorted(list(embeds))], embeds)
|
class SingleContextConvert(pl.LightningModule):
def __init__(self, model_config: ConveRTModelConfig, train_config: ConveRTTrainConfig):
super().__init__()
self.model_config = model_config
self.train_config = train_config
self.transformer_layers = TransformerLayers(model_config)
self.ff2_context = FeedForward2(model_config)
self.ff2_reply = FeedForward2(model_config)
self.loss_function = LossFunction()
self.weight_decay = train_config.l2_weight_decay
self.hparams = self.train_config._field_defaults
self.hparams.update(self.model_config._field_defaults)
self.subword_params = None
logger.info('number of parameters: %e', sum((p.numel() for p in self.parameters())))
def register_subword_params(self):
self.subword_params = find_subword_params(self)[0]
def forward(self, x):
return self.transformer_layers(x)
def backward(self, trainer, loss, optimizer, optimizer_idx):
'override hook of lightning as want specific grad norm clip of only subword embedding parameters, after loss.backward()\n but before optimizer step'
loss.backward()
torch.nn.utils.clip_grad_norm_(self.subword_params, self.train_config.grad_norm_clip)
def configure_optimizers(self):
'\n here I did not implement weight decay on bias and Layernorm layers as is typical in modern NLP papers.\n I do not think the paper specified params to avoid weight decay on\n :return:\n :rtype:\n '
no_decay = ['bias', 'LayerNorm.weight']
params_decay = [p for (n, p) in self.named_parameters() if (not any(((nd in n) for nd in no_decay)))]
params_nodecay = [p for (n, p) in self.named_parameters() if any(((nd in n) for nd in no_decay))]
optim_groups = [{'params': params_decay, 'weight_decay': self.hparams.l2_weight_decay}, {'params': params_nodecay, 'weight_decay': 0.0}]
optimizer = torch.optim.AdamW(optim_groups, lr=self.hparams.learning_rate)
return optimizer
def training_step(self, batch, batch_idx):
batch_context = batch.context
batch_reply = batch.reply
rx = self(batch_context)
ry = self(batch_reply)
hx = self.ff2_context(rx, batch_context.attention_mask)
hy = self.ff2_reply(ry, batch_reply.attention_mask)
loss = self.loss_function(hx, hy)
tqdm_dict = {'train_loss': loss}
output = OrderedDict({'loss': loss, 'progress_bar': tqdm_dict, 'log': tqdm_dict})
return output
def validation_step(self, batch, batch_idx):
output = self.training_step(batch, batch_idx)
val_output = {'val_loss': output['loss']}
return val_output
|
def _parse_args():
'Parse command-line arguments.'
parser = argparse.ArgumentParser()
parser.add_argument('--progress_bar_refresh_rate', type=int, default=1)
parser.add_argument('--row_log_interval', type=int, default=1)
args = parser.parse_args()
return args
|
def main(**kwargs):
set_seed(1)
train_config = ConveRTTrainConfig()
model_config = ConveRTModelConfig()
tokenizer = SentencePieceProcessor()
args = _parse_args()
tokenizer.Load(train_config.sp_model_path)
train_instances = load_instances_from_reddit_json(train_config.dataset_path)
RD = RedditData(train_instances, tokenizer, 60)
dm = DataModule()
train_loader = dm.train_dataloader(RD)
model = SingleContextConvert(model_config, train_config)
lr_decay = LearningRateDecayCallback(train_config)
model.register_subword_params()
trainer = pl.Trainer.from_argparse_args(args, callbacks=[lr_decay], **kwargs)
trainer.fit(model, train_dataloader=train_loader, val_dataloaders=train_loader)
|
@pytest.fixture
def config():
return ConveRTTrainConfig()
|
@pytest.fixture
def tokenizer() -> SentencePieceProcessor:
tokenizer = SentencePieceProcessor()
tokenizer.Load(config.sp_model_path)
return tokenizer
|
def test_load_instances_from_reddit_json(config):
instances = load_instances_from_reddit_json(config.dataset_path)
assert (len(instances) == 1000)
|
class TestModelTraining(unittest.TestCase):
'Check can overfit small batch etc. without issues'
def test_fast_dev_run(self):
t = time()
try:
main(fast_dev_run=True)
except:
self.fail('Obvious Training Problem!')
time_taken = (time() - t)
self.assertLess(time_taken, 10)
|
@pytest.fixture
def model_config():
return ConveRTModelConfig()
|
@pytest.fixture
def train_config():
return ConveRTTrainConfig(train_batch_size=64, split_size=8, learning_rate=2e-05)
|
def test_circulant_t():
assert (circulant_mask(50, 47).sum().item() == 2494)
try:
circulant_mask(47, 50)
circulant_mask(47, 47)
circulant_mask(47, 45)
except ExceptionType:
self.fail('ciculant_t Failed')
|
def test_SubwordEmbedding(train_config, model_config):
embedding = SubwordEmbedding(model_config)
input_token_ids = torch.randint(high=model_config.vocab_size, size=(train_config.train_batch_size, SEQ_LEN))
positional_input = torch.randint(high=model_config.vocab_size, size=(train_config.train_batch_size, SEQ_LEN))
embedding_output = embedding(input_ids=input_token_ids, position_ids=positional_input)
assert (embedding_output.size() == (train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden))
|
def test_SelfAttention(model_config, train_config):
attention = SelfAttention(model_config, relative_attention)
query = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
attn_mask = torch.ones(query.size()[:(- 1)], dtype=torch.float)
output = attention(query, attn_mask)
assert (output.size() == (train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden))
|
def test_FeedForward1(train_config, model_config):
ff1 = FeedForward1(model_config.num_embed_hidden, model_config.feed_forward1_hidden, model_config.dropout_rate)
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
output = ff1(embed)
assert (output.size() == embed.size())
|
def test_SharedInnerBlock(train_config, model_config):
from random import randrange
SIB = SharedInnerBlock(model_config, model_config.relative_attns[randrange(6)])
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
attn_mask = torch.ones(embed.size()[:(- 1)], dtype=torch.float)
out1 = SIB(embed, attn_mask)
assert (out1.size() == embed.size())
|
def test_MultiheadAttention(train_config, model_config):
MHA = MultiheadAttention(model_config)
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
attn_mask = torch.ones(embed.size()[:(- 1)], dtype=torch.float)
assert ((model_config.num_embed_hidden % MHA.num_attention_heads) == 0)
assert (MHA(embed, attn_mask).size() == (train_config.train_batch_size, SEQ_LEN, (model_config.num_embed_hidden * model_config.num_attention_heads)))
|
def test_TransformerLayers(model_config):
TL = TransformerLayers(model_config)
path = str(((Path(__file__).parents[1].resolve() / 'data') / 'batch_context.pickle'))
with open(path, 'rb') as input_file:
encoder_input = pickle.load(input_file)
print(type(encoder_input))
embedding = SubwordEmbedding(model_config)
emb_output = embedding(encoder_input.input_ids, encoder_input.position_ids)
assert (TL(encoder_input).size() == (emb_output.size()[:(- 1)] + ((model_config.num_embed_hidden * model_config.num_attention_heads),)))
|
def test_FeedForward2(model_config, train_config):
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, (model_config.num_embed_hidden * model_config.num_attention_heads))
attn_mask = torch.ones(embed.size()[:(- 1)], dtype=torch.float)
FF2 = FeedForward2(model_config)
assert (FF2(embed, attn_mask).size() == (train_config.train_batch_size, model_config.num_embed_hidden))
|
def pytest_ignore_collect(path, config):
'Ignore paths that would otherwise be collceted by the doctest\n plugin and lead to ImportError due to missing dependencies.\n '
return any((path.fnmatch(ignore) for ignore in ignore_test_paths))
|
class Initializer(object):
'Base class for parameter tensor initializers.\n\n The :class:`Initializer` class represents a weight initializer used\n to initialize weight parameters in a neural network layer. It should be\n subclassed when implementing new types of weight initializers.\n\n '
def __call__(self, shape):
'\n Makes :class:`Initializer` instances callable like a function, invoking\n their :meth:`sample()` method.\n '
return self.sample(shape)
def sample(self, shape):
'\n Sample should return a theano.tensor of size shape and data type\n theano.config.floatX.\n\n Parameters\n -----------\n shape : tuple or int\n Integer or tuple specifying the size of the returned\n matrix.\n returns : theano.tensor\n Matrix of size shape and dtype theano.config.floatX.\n '
raise NotImplementedError()
|
class Normal(Initializer):
'Sample initial weights from the Gaussian distribution.\n\n Initial weight parameters are sampled from N(mean, std).\n\n Parameters\n ----------\n std : float\n Std of initial parameters.\n mean : float\n Mean of initial parameters.\n '
def __init__(self, std=0.01, mean=0.0):
self.std = std
self.mean = mean
def sample(self, shape):
return floatX(get_rng().normal(self.mean, self.std, size=shape))
|
class Uniform(Initializer):
'Sample initial weights from the uniform distribution.\n\n Parameters are sampled from U(a, b).\n\n Parameters\n ----------\n range : float or tuple\n When std is None then range determines a, b. If range is a float the\n weights are sampled from U(-range, range). If range is a tuple the\n weights are sampled from U(range[0], range[1]).\n std : float or None\n If std is a float then the weights are sampled from\n U(mean - np.sqrt(3) * std, mean + np.sqrt(3) * std).\n mean : float\n see std for description.\n '
def __init__(self, range=0.01, std=None, mean=0.0):
if (std is not None):
a = (mean - (np.sqrt(3) * std))
b = (mean + (np.sqrt(3) * std))
else:
try:
(a, b) = range
except TypeError:
(a, b) = ((- range), range)
self.range = (a, b)
def sample(self, shape):
return floatX(get_rng().uniform(low=self.range[0], high=self.range[1], size=shape))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.