code
stringlengths
17
6.64M
class ImpalaModel(nn.Module): def __init__(self, in_channels, **kwargs): super(ImpalaModel, self).__init__() self.block1 = ImpalaBlock(in_channels=in_channels, out_channels=16) self.block2 = ImpalaBlock(in_channels=16, out_channels=32) self.block3 = ImpalaBlock(in_channels=32, out_channels=32) self.fc = nn.Linear(in_features=((32 * 8) * 8), out_features=256) self.output_dim = 256 self.apply(xavier_uniform_init) def forward(self, x): x = self.block1(x) x = self.block2(x) x = self.block3(x) x = nn.ReLU()(x) x = Flatten()(x) x = self.fc(x) x = nn.ReLU()(x) return x
class GRU(nn.Module): def __init__(self, input_size, hidden_size): super(GRU, self).__init__() self.gru = orthogonal_init(nn.GRU(input_size, hidden_size), gain=1.0) def forward(self, x, hxs, masks): if (x.size(0) == hxs.size(0)): masks = masks.unsqueeze((- 1)) (x, hxs) = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0)) x = x.squeeze(0) hxs = hxs.squeeze(0) else: N = hxs.size(0) T = int((x.size(0) / N)) x = x.view(T, N, x.size(1)) masks = masks.view(T, N) has_zeros = (masks[1:] == 0.0).any(dim=(- 1)).nonzero().squeeze().cpu() if (has_zeros.dim() == 0): has_zeros = [(has_zeros.item() + 1)] else: has_zeros = (has_zeros + 1).numpy().tolist() has_zeros = (([0] + has_zeros) + [T]) hxs = hxs.unsqueeze(0) outputs = [] for i in range((len(has_zeros) - 1)): start_idx = has_zeros[i] end_idx = has_zeros[(i + 1)] (rnn_scores, hxs) = self.gru(x[start_idx:end_idx], (hxs * masks[start_idx].view(1, (- 1), 1))) outputs.append(rnn_scores) x = torch.cat(outputs, dim=0) x = x.view((T * N), (- 1)) hxs = hxs.squeeze(0) return (x, hxs)
class CategoricalPolicy(nn.Module): def __init__(self, embedder, recurrent, action_size): '\n embedder: (torch.Tensor) model to extract the embedding for observation\n action_size: number of the categorical actions\n ' super(CategoricalPolicy, self).__init__() self.embedder = embedder self.fc_policy = orthogonal_init(nn.Linear(self.embedder.output_dim, action_size), gain=0.01) self.fc_value = orthogonal_init(nn.Linear(self.embedder.output_dim, 1), gain=1.0) self.recurrent = recurrent if self.recurrent: self.gru = GRU(self.embedder.output_dim, self.embedder.output_dim) def is_recurrent(self): return self.recurrent def forward(self, x, hx, masks): hidden = self.embedder(x) if self.recurrent: (hidden, hx) = self.gru(hidden, hx, masks) logits = self.fc_policy(hidden) log_probs = F.log_softmax(logits, dim=1) p = Categorical(logits=log_probs) v = self.fc_value(hidden).reshape((- 1)) return (p, v, hx)
def create_eval_set(fold): data = urbansound8k.load_dataset() (folds, test) = urbansound8k.folds(data) test = test.copy() train = folds[fold][0].copy() val = folds[fold][1].copy() test['set'] = 'test' train['set'] = 'train' val['set'] = 'val' df = pandas.concat([test, val]) return df
def load_sample(sample): fsettings = features.settings(exsettings) return features.load_sample(sample, fsettings, start_time=sample.start, window_frames=exsettings['frames'], feature_dir='data/features')
def predict(model, data): return features.predict_voted(exsettings, model, data, loader=load_sample, method=exsettings['voting'], overlap=exsettings['voting_overlap'])
def model_predict(predictor, model_path, data): model = keras.models.load_model(model_path) p = predictor(model, data) return p
def threshold(df, q=0.8): q = df[(df.correct == False)].best_p.quantile(q=q) return q
def plot_errors(df, ax=None, q=0.8, bins=20, ylim=None): if (ax is None): (fig, ax) = plt.subplots(1) ((_, wrong), (isright, right)) = df.groupby('correct') assert (isright == True) right.best_p.hist(ax=ax, color='green', alpha=0.4, bins=bins) wrong.best_p.hist(ax=ax, color='red', alpha=0.4, bins=bins) ax.set_xlim((0, 1.0)) if ylim: ax.set_ylim(ylim) if (q is not None): p_min = threshold(df, q=q) ax.axvline(x=p_min, color='black', linewidth=1.0, alpha=0.7) return ax
def plot_errors_classwise(df, figsize=(12, 4)): groups = eval_set.groupby('class') (fig, axs) = plt.subplots(2, (len(groups) // 2), figsize=figsize) for (i, (classname, data)) in enumerate(groups): x = (i // 2) y = (i % 2) ax = axs[(y, x)] ax.set_title(classname) if (x != 0): ax.tick_params(labelleft=False) if (y == 0): ax.tick_params(labelbottom=False) bins = numpy.linspace(0, 1.0, 20) plot_errors(data, ax=ax, ylim=(0, 30), q=None, bins=bins) fig.tight_layout() return fig
def score(df, average=None, threshold=0.0): y_true = df.classID y_pred = df.best_y uncertain = (df.best_p < threshold) uncertain_ratio = (numpy.count_nonzero(uncertain.astype(int)) / len(y_pred)) y_pred = y_pred.mask(uncertain, 11) labels = list(range(0, 10)) precision = sklearn.metrics.precision_score(y_true, y_pred, average=average, labels=labels) recall = sklearn.metrics.recall_score(y_true, y_pred, average=average, labels=labels) out = pandas.Series({'precision': precision, 'recall': recall, 'uncertain': uncertain_ratio}) return out
def plot_precision_recall(data, ax=None): df = pandas.DataFrame({'threshold': numpy.linspace(0, 1.0, 50, endpoint=False)}) micro = df.apply((lambda r: score(data, average='micro', threshold=r.threshold)), axis=1) micro['threshold'] = df.threshold micro['micro'] = micro.precision macro = df.apply((lambda r: score(data, average='macro', threshold=r.threshold)), axis=1) macro['threshold'] = df.threshold macro['macro'] = macro.precision (fig, (ax2, ax)) = plt.subplots(1, 2, figsize=(8, 4)) macro.plot.line(ax=ax2, y=['precision', 'recall'], x='threshold', ylim=(0.0, 1.0)) micro.plot.line(ax=ax, y='micro', x='recall', ylim=(0.0, 1.0), xlim=(0, 1)) macro.plot.line(ax=ax, y='macro', x='recall', ylim=(0.0, 1.0), xlim=(0, 1)) ax.set_yticks(numpy.arange(0.0, 1.0, 0.1)) ax.grid(True) ax.set_ylabel('Precision') ax.set_xlabel('Recall') ax.set_aspect('equal') ax2.set_aspect('equal') ax2.set_yticks(numpy.arange(0.0, 1.0, 0.1)) ax2.grid(True) ax2.set_xlabel('Probability threshold for "unknown" class') ax2.set_ylabel('Performance metric') fig.tight_layout()
def load_device_results(results_dir): frames = [] for filename in os.listdir(results_dir): if filename.endswith('.device.json'): experiment = filename.rstrip('.device.json') p = os.path.join(results_dir, filename) with open(p, 'r') as f: contents = f.read() contents = contents.replace("'", '"') d = json.loads(contents) d['experiment'] = experiment df = pandas.DataFrame([d]) frames.append(df) df = pandas.concat(frames) df.set_index('experiment', inplace=True) return df
def plot_layers_ram(layers_ram, ax=None, max_ram=64000.0): if (not ax): (fig, ax) = plt.subplots(1, figsize=(4, 6)) l = layers_ram.sort_index(ascending=False) l['activations_ram'] = (4 * l.activations) l = l[l.activations_ram.notna()] l.plot(kind='barh', ax=ax, y='activations_ram', x='name') ax.axvspan(xmin=0, xmax=max_ram, alpha=0.2, color='green') return fig
def read_report(ser): lines = [] state = 'wait-for-start' while (state != 'ended'): raw = ser.readline() line = raw.decode('utf-8').strip() if (state == 'wait-for-start'): if line.startswith('Results for'): state = 'started' if (state == 'started'): lines.append(line) if line.endswith('cfg=0'): state = 'ended' return '\n'.join(lines)
def parse_report(report): out = {} result_regexp = '@(\\d*)MHz\\/(\\d*)MHz.*complexity:\\s(\\d*)\\sMACC' matches = list(re.finditer(result_regexp, report, re.MULTILINE)) (cpu_freq, cpu_freq_max, macc) = matches[0].groups() out['cpu_mhz'] = int(cpu_freq) out['macc'] = int(macc) key_value_regex = '(.*)\\s:\\s(.*)' matches = re.finditer(key_value_regex, report, re.MULTILINE) for (matchNum, match) in enumerate(matches, start=1): (key, value) = match.groups() key = key.strip() value = value.strip() if (key == 'used stack'): out['stack'] = int(value.rstrip(' bytes')) if (key == 'duration'): out['duration_avg'] = (float(value.rstrip(' ms (average)')) / 1000) if (key == 'CPU cycles'): out['cycles_avg'] = int(value.split()[0]) out['cycles_macc'] = (out['cycles_avg'] / out['macc']) return out
def test_parse_report(): out = parse_report(example_report) assert (out['duration_avg'] == 0.325142) assert (out['cycles_avg'] == 26011387) assert (out['stack'] == 276) assert (out['cpu_mhz'] == 80) assert (out['macc'] == 2980798)
def main(): test_parse_report() device = '/dev/ttyACM0' baudrate = 115200 with serial.Serial(device, baudrate, timeout=0.5) as ser: thrash = ser.read(10000) report = read_report(ser) out = parse_report(report) print(json.dumps(out))
def ensure_dir(directory): if (not os.path.exists(directory)): os.makedirs(directory)
def ensure_dir_for_file(path): directory = os.path.dirname(path) ensure_dir(directory)
def ensure_directories(*dirs): for dir in dirs: ensure_dir(dir)
def add_arguments(parser): a = parser.add_argument a('--datasets', dest='datasets_dir', default='./data/datasets', help='%(default)s') a('--features', dest='features_dir', default='./data/features', help='%(default)s') a('--models', dest='models_dir', default='./data/models', help='%(default)s') a('--settings', dest='settings_path', default='./experiments/ldcnn20k60.yaml', help='%(default)s')
def load_settings_path(path): with open(path, 'r') as config_file: settings = yaml.load(config_file.read()) return settings
def arglist(options): def format_arg(k, v): if (v is None): return '--{}'.format(k) else: return '--{}={}'.format(k, v) args = [format_arg(k, v) for (k, v) in options.items()] return args
def command_for_job(options): args = ['python3', 'train.py'] args += arglist(options) return args
def generate_train_jobs(experiments, settings_path, folds, overrides, ignored=['nickname']): timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M') unique = str(uuid.uuid4())[0:4] def name(experiment, fold): name = '-'.join([experiment, timestamp, unique]) return (name + '-fold{}'.format(fold)) def create_job(exname, experiment, fold): n = name(exname, fold) options = {'name': n, 'fold': fold, 'settings': settings_path} for (k, v) in experiment.items(): if (k == 'modelcheck'): if (v == 'skip'): options['skip_model_check'] = None else: options[k] = v for (k, v) in overrides.items(): options[k] = v for k in ignored: del options[k] return options jobs = [] for fold in folds: for (idx, ex) in experiments.iterrows(): j = create_job(str(idx), ex, fold) jobs.append(j) assert (len(jobs) == (len(experiments) * len(folds))), len(jobs) return jobs
def run_job(jobdata, out_dir, verbose=2): args = command_for_job(jobdata) job_dir = os.path.join(out_dir, jobdata['name']) common.ensure_directories(job_dir) log_path = os.path.join(job_dir, 'stdout.log') cmdline = ' '.join(args) with open(os.path.join(job_dir, 'cmdline'), 'w') as f: f.write(cmdline) start = time.time() print('starting job', cmdline) print('job log', log_path) exitcode = None with open(log_path, 'w') as log_file: process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE) for line in iter(process.stdout.readline, b''): line = line.decode('utf-8') if (verbose > 2): sys.stdout.write(line) log_file.write(line) log_file.flush() exitcode = process.wait() files = os.listdir(job_dir) assert ('train.csv' in files), files assert ('history.csv' in files), files model_files = [p for p in files if p.endswith('.hdf5')] assert (len(model_files) > 0), files end = time.time() res = {'start': start, 'end': end, 'exitcode': exitcode} return res
def run_jobs(commands, out_dir, n_jobs=5, verbose=1): jobs = [joblib.delayed(run_job)(cmd, out_dir) for cmd in commands] out = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)(jobs) return out
def parse(args): import argparse parser = argparse.ArgumentParser(description='Generate jobs') common.add_arguments(parser) a = parser.add_argument a('--experiments', default='models.csv', help='%(default)s') a('--check', action='store_true', help='Only run a pre-flight check') a('--jobs', type=int, default=5, help='Number of parallel jobs') a('--folds', type=int, default=10, help='Number of folds to test') a('--start', type=int, default=0, help='First experiment') a('--stop', type=int, default=None, help='Last experiment') parsed = parser.parse_args(args) return parsed
def main(): args = parse(sys.argv[1:]) experiments = pandas.read_csv(args.experiments) settings = common.load_settings_path(args.settings_path) stop = (len(experiments) if (args.stop is None) else args.stop) experiments = experiments.loc[range(args.start, stop)] overrides = {} folds = list(range(1, (args.folds + 1))) assert (max(folds) <= 10) if args.check: batches = 2 overrides['batch'] = 10 overrides['epochs'] = 1 overrides['train_samples'] = (batches * overrides['batch']) overrides['val_samples'] = (batches * overrides['batch']) cmds = generate_train_jobs(experiments, args.settings_path, folds, overrides) print('Preparing {} jobs', len(cmds)) print('\n'.join([c['name'] for c in cmds])) out = run_jobs(cmds, args.models_dir, n_jobs=args.jobs) print(out) success = all([(o['exitcode'] == 0) for o in out]) assert success
def build(settings): builder = families.get(settings['model']) options = dict(frames=settings['frames'], bands=settings['n_mels'], channels=settings.get('channels', 1)) known_settings = ['conv_size', 'conv_block', 'downsample_size', 'n_stages', 'dropout', 'fully_connected', 'n_blocks_per_stage', 'filters'] for k in known_settings: v = settings.get(k, None) options[k] = v model = builder(**options) return model
def build_model(frames=128, bands=40, channels=1, n_classes=10, conv_size=(3, 3), conv_block='conv', downsample_size=(2, 2), n_stages=3, n_blocks_per_stage=1, filters=128, kernels_growth=1.0, fully_connected=64, rnn_units=32, temporal='bigru', dropout=0.5, l2=0.001, backend='detection'): from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Conv2D, LSTM, GRU, Bidirectional, MaxPooling2D, Reshape, TimeDistributed, Softmax, Dense, SeparableConv2D model = Sequential() input_shape = (frames, bands, channels) def add_conv_block(model, downsample_size, conv_filters=filters, kernel_size=conv_size, **kwargs): model.add(SeparableConv2D(conv_filters, conv_size, **kwargs)) model.add(MaxPooling2D(downsample_size)) add_conv_block(model, downsample_size=(1, 5), input_shape=input_shape) add_conv_block(model, downsample_size=(1, 2)) add_conv_block(model, downsample_size=(1, 2)) if (temporal == 'bigru'): o = model.layers[(- 1)].output_shape model.add(Reshape((o[1], (- 1)))) model.add(Bidirectional(GRU(rnn_units, return_sequences=True))) model.add(Bidirectional(GRU(rnn_units, return_sequences=True))) elif (temporal == 'tcn'): model.add(SeparableConv2D(rnn_units, (9, 1), strides=(2, 1))) model.add(SeparableConv2D(rnn_units, (9, 1), strides=(2, 1))) else: raise ValueError(f'Unknown temporal parameter {temporal}') o = model.layers[(- 1)].output_shape if (backend == 'classification'): model.add(TimeDistributed(Dense(fully_connected, activation='linear'))) model.add(layers.Dense(n_classes)) model.add(Softmax()) elif (backend == 'detection'): model.add(TimeDistributed(Dense(n_classes, activation='linear'), input_shape=(o[1], o[2]))) model.add(Softmax()) elif (not backend): pass else: raise ValueError(f"Unsupported backend '{backend}'") return model
def test_model(): model = build_model(filters=24, bands=64, rnn_units=16, n_classes=3, temporal='tcn') print(model.summary())
def dcnn_head(input, head_name, filters=80, kernel=(3, 3)): def n(base): return ((base + '_') + head_name) from keras.layers import Convolution2D, Flatten, MaxPooling2D x = input x = Convolution2D(filters, kernel, dilation_rate=(1, 1), name=n('DilaConv1'))(x) x = MaxPooling2D(pool_size=(4, 3), name=n('MPL1'))(x) x = Convolution2D(filters, kernel, dilation_rate=(2, 2), name=n('DilaConv2'))(x) x = MaxPooling2D(pool_size=(1, 3), name=n('MPL2'))(x) x = Flatten(name=n('flatten'))(x) return x
def dcnn(bands=60, frames=31, n_classes=10, fully_connected=5000, filters=80, activation='relu'): '\n Dilated Convolution Neural Network with LeakyReLU for Environmental Sound Classification\n\n https://ieeexplore.ieee.org/document/8096153\n ' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate import keras.layers input_shape = (bands, frames, 1) def head(input, name): return dcnn_head(input, name, filters) mel_input = Input(shape=input_shape, name='mel_input') delta_input = Input(shape=input_shape, name='delta_input') heads = [head(mel_input, 'mel'), head(delta_input, 'delta')] m = keras.layers.concatenate(heads) m = Dense(fully_connected, activation=activation)(m) m = Dense(fully_connected, activation=activation)(m) m = Dense(n_classes, activation='softmax')(m) model = Model([mel_input, delta_input], m) return model
def dcnn_nodelta(bands=60, frames=31, n_classes=10, channels=1, fully_connected=5000, filters=80, activation='relu'): from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate import keras.layers input_shape = (bands, frames, channels) def head(input, name): return dcnn_head(input, name, filters) mel_input = Input(shape=input_shape, name='mel_input') m = head(mel_input, 'mel') m = Dense(fully_connected, activation=activation)(m) m = Dense(fully_connected, activation=activation)(m) m = Dense(n_classes, activation='softmax')(m) model = Model(mel_input, m) return model
def main(): m = dcnn() m.save('dcnn.hdf5') m.summary() m = dcnn_nodelta() m.save('dcnn.nodelta.hdf5') m.summary()
def build_model(bands=60, frames=41, channels=1, n_labels=10, dropout=0.0, depth=7, block=2, growth=15, pooling='avg', bottleneck=False, reduction=0.0, subsample=True): '\n DenseNet\n ' from keras_contrib.applications import densenet input_shape = (bands, frames, channels) model = densenet.DenseNet(input_shape=input_shape, pooling=pooling, depth=depth, nb_dense_block=block, growth_rate=growth, bottleneck=bottleneck, reduction=reduction, subsample_initial_block=subsample, include_top=True, classes=n_labels, dropout_rate=dropout) return model
def main(): m = build_model() m.save('densenet.hdf5') m.summary()
def dilaconv(bands=64, frames=41, channels=2, dilation=(2, 2), kernel=(3, 3), n_labels=10, dropout=0.5, kernels=[32, 32, 64, 64]): '\n Environmental sound classification with dilated convolutions\n\n https://www.sciencedirect.com/science/article/pii/S0003682X18306121\n ' from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, GlobalAveragePooling2D from keras.regularizers import l2 input_shape = (bands, frames, channels) conv = [Convolution2D(kernels[0], kernel, input_shape=input_shape, activation='relu')] for k in kernels[1:]: c = Convolution2D(k, kernel, dilation_rate=dilation, activation='relu') conv.append(c) model = Sequential((conv + [GlobalAveragePooling2D(), Dropout(dropout), Dense(n_labels, activation='softmax')])) return model
def main(): m = dilaconv() m.summary() m.save('dilaconv.hdf5') m = ldcnn() m.save('ldcnn.hdf5') m.summary() m = ldcnn_nodelta() m.save('ldcnn.nodelta.hdf5') m.summary()
def build_model(bands=128, frames=128, channels=2, n_classes=10, filters=80, L=57, W=6, fully_connected=5000): '\n Deep Convolutional Neural Network with Mixup for Environmental Sound Classification\n \n https://link.springer.com/chapter/10.1007/978-3-030-03335-4_31\n ' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate from keras.layers import Convolution2D, Flatten, MaxPooling2D import keras.layers input_shape = (bands, frames, channels) model = Sequential([Convolution2D(32, (3, 7), padding='same', input_shape=input_shape), Convolution2D(32, (3, 5), padding='same'), MaxPooling2D(pool_size=(4, 3)), Convolution2D(64, (3, 1), padding='same'), Convolution2D(64, (3, 1), padding='same'), MaxPooling2D(pool_size=(4, 1)), Convolution2D(128, (1, 5), padding='same'), Convolution2D(128, (1, 5), padding='same'), MaxPooling2D(pool_size=(1, 3)), Convolution2D(256, (3, 3), padding='same'), Convolution2D(256, (3, 3), padding='same'), MaxPooling2D(pool_size=(2, 2)), Dense(512, activation='relu'), Dense(n_classes, activation='softmax')]) return model
def main(): m = build_model() m.summary() m.save('dmix.orig.hdf5')
def get_post(x_in): x = Activation('relu')(x_in) x = BatchNormalization()(x) return x
def get_block(x_in, ch_in, ch_out, kernel=3, downsample=2, strides=(1, 1)): x = Conv2D(ch_in, kernel_size=(1, 1), strides=strides, padding='same', use_bias=False)(x_in) x = get_post(x) x = DepthwiseConv2D(kernel_size=(1, kernel), padding='same', use_bias=False)(x) x = get_post(x) x = MaxPool2D(pool_size=(downsample, 1), strides=(downsample, 1))(x) x = DepthwiseConv2D(kernel_size=(kernel, 1), padding='same', use_bias=False)(x) x = get_post(x) x = Conv2D(ch_out, kernel_size=(downsample, 1), strides=(1, downsample), padding='same', use_bias=False)(x) x = get_post(x) return x
def Effnet(input_shape, nb_classes, n_blocks=2, initial_filters=16, filter_growth=2.0, dropout=0.5, kernel=5, downsample=2, pool=None, include_top='flatten', weights=None): if getattr(kernel, '__iter__', None): assert (kernel[0] == kernel[1]) kernel = kernel[0] x_in = Input(shape=input_shape) x = x_in for block_no in range(n_blocks): filters_in = int((initial_filters * (filter_growth ** block_no))) filters_out = int((initial_filters * (filter_growth ** (block_no + 1)))) strides = ((2, 2) if (block_no == 0) else (1, 1)) x = get_block(x, filters_in, filters_out, kernel=kernel, downsample=downsample, strides=strides) if (include_top == 'flatten'): x = Flatten()(x) x = Dropout(dropout)(x) x = Dense(nb_classes, activation='softmax')(x) elif (include_top == 'conv'): x = GlobalAveragePooling2D()(x) shape = (1, 1, filters_out) x = Reshape(shape)(x) x = Dropout(dropout)(x) x = Conv2D(nb_classes, (1, 1), padding='same')(x) x = Activation('softmax', name='act_softmax')(x) x = Reshape((nb_classes,))(x) model = Model(inputs=x_in, outputs=x) if (weights is not None): model.load_weights(weights, by_name=True) return model
def build_model(frames=31, bands=60, channels=1, n_classes=10, **kwargs): shape = (bands, frames, channels) return Effnet(shape, nb_classes=n_classes, **kwargs)
def main(): m = build_model() m.summary() m.save('effnet.hdf5')
def ldcnn_head(input, head_name, filters=80, L=57, W=6): def n(base): return ((base + '_') + head_name) from keras.layers import Convolution2D, Flatten, MaxPooling2D, BatchNormalization x = input x = Convolution2D(filters, (L, 1), activation='relu', name=n('SFCL1'))(x) x = BatchNormalization()(x) x = Convolution2D(filters, (1, W), activation='relu', name=n('SFCL2'))(x) x = BatchNormalization()(x) x = MaxPooling2D(pool_size=(4, 3), strides=(1, 3), name=n('MPL1'))(x) x = Convolution2D(filters, (1, 3), dilation_rate=(2, 2), name=n('DCL'))(x) x = MaxPooling2D(pool_size=(1, 3), strides=(1, 3), name=n('MPL2'))(x) x = Flatten(name=n('flatten'))(x) return x
def ldcnn(bands=60, frames=31, n_classes=10, filters=80, L=57, W=6, fully_connected=5000, dropout=0.25): '\n LD-CNN: A Lightweight Dilated Convolutional Neural Network for Environmental Sound Classification\n \n http://epubs.surrey.ac.uk/849351/1/LD-CNN.pdf\n ' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate from keras.regularizers import l2 import keras.layers input_shape = (bands, frames, 1) def head(input, name): return ldcnn_head(input, name, filters, L, W) mel_input = Input(shape=input_shape, name='mel_input') delta_input = Input(shape=input_shape, name='delta_input') heads = [head(mel_input, 'mel'), head(delta_input, 'delta')] m = keras.layers.add(heads, name='FSL') m = Dropout(dropout)(m) m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m) m = Dropout(dropout)(m) m = Dense(n_classes, activation='softmax')(m) model = Model([mel_input, delta_input], m) return model
def ldcnn_nodelta(bands=60, frames=31, n_classes=10, filters=80, L=57, W=6, channels=1, fully_connected=5000, dropout=0.5): 'Variation of LD-CNN with only mel input (no deltas)' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate from keras.regularizers import l2 input_shape = (bands, frames, channels) input = Input(shape=input_shape, name='mel_input') m = ldcnn_head(input, 'mel', filters, L, W) m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m) m = Dropout(dropout)(m) m = Dense(n_classes, kernel_regularizer=l2(0.001))(m) m = Dropout(dropout)(m) m = Activation('softmax')(m) model = Model(input, m) return model
def relu6(x, name): if False: x = layers.ReLU(6.0, name=name)(x) else: x = layers.Activation('relu')(x) return x
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): channel_axis = (1 if (backend.image_data_format() == 'channels_first') else (- 1)) filters = int((filters * alpha)) padding = ((0, (kernel[1] // 2)), (0, (kernel[1] // 2))) x = layers.ZeroPadding2D(padding=padding, name='conv1_pad')(inputs) x = layers.Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return relu6(x, name='conv1_relu')
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), kernel=(3, 3), block_id=1): channel_axis = (1 if (backend.image_data_format() == 'channels_first') else (- 1)) pointwise_conv_filters = int((pointwise_conv_filters * alpha)) layers = keras.layers if (strides == (1, 1)): x = inputs else: x = layers.ZeroPadding2D(((0, (kernel[1] // 2)), (0, (kernel[1] // 2))), name=('conv_pad_%d' % block_id))(inputs) x = layers.DepthwiseConv2D(kernel, padding=('same' if (strides == (1, 1)) else 'valid'), depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=('conv_dw_%d' % block_id))(x) x = layers.BatchNormalization(axis=channel_axis, name=('conv_dw_%d_bn' % block_id))(x) x = relu6(x, name=('conv_dw_%d_relu' % block_id)) x = layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=('conv_pw_%d' % block_id))(x) x = layers.BatchNormalization(axis=channel_axis, name=('conv_pw_%d_bn' % block_id))(x) return relu6(x, name=('conv_pw_%d_relu' % block_id))
def build_model(frames=32, bands=32, channels=1, n_classes=10, dropout=0.5, depth_multiplier=1, alpha=0.5, n_stages=2, initial_filters=24, kernel=(5, 5), pool=(2, 2)): '\n ' (stride_f, stride_t) = pool from keras.applications import mobilenet conv = _conv_block dwconv = _depthwise_conv_block assert (keras.backend.image_data_format() == 'channels_last') input_shape = (bands, frames, channels) img_input = keras.layers.Input(shape=input_shape) x = conv(img_input, initial_filters, alpha, kernel=kernel, strides=(2, 2)) x = dwconv(x, (initial_filters * 2), alpha, depth_multiplier, block_id=1) for stage_no in range(1, n_stages): filters = (initial_filters * (2 ** stage_no)) x = dwconv(x, filters, alpha, depth_multiplier, kernel=kernel, strides=(stride_f, stride_t), block_id=(stage_no * 2)) x = dwconv(x, filters, alpha, depth_multiplier, kernel=kernel, block_id=((stage_no * 2) + 1)) shape = (1, 1, int((filters * alpha))) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Reshape(shape, name='reshape_1')(x) x = keras.layers.Dropout(dropout, name='dropout')(x) x = keras.layers.Conv2D(n_classes, (1, 1), padding='same', name='conv_preds')(x) x = keras.layers.Activation('softmax', name='act_softmax')(x) x = keras.layers.Reshape((n_classes,), name='reshape_2')(x) model = keras.Model(img_input, x) return model
def build_model(bands=60, frames=41, channels=2, n_labels=10, fc=5000, dropout=0.5): '\n Implements the short-segment CNN from\n\n ENVIRONMENTAL SOUND CLASSIFICATION WITH CONVOLUTIONAL NEURAL NETWORKS\n Karol J. Piczak, 2015.\n https://karol.piczak.com/papers/Piczak2015-ESC-ConvNet.pdf\n ' from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.regularizers import l2 input_shape = (bands, frames, channels) model = Sequential([Convolution2D(80, ((bands - 3), 6), strides=(1, 1), input_shape=input_shape), MaxPooling2D((4, 3), strides=(1, 3)), Convolution2D(80, (1, 3)), MaxPooling2D((1, 3), strides=(1, 3)), Dense(fc, activation='relu'), Dropout(dropout), Dense(fc, activation='relu'), Dropout(dropout), Dense(n_labels, activation='softmax')]) return model
def main(): m = build_model() m.save('piczak.orig.hdf5') m.summary()
def build_model(frames=128, bands=128, channels=1, n_classes=10, conv_size=(5, 5), conv_block='conv', downsample_size=(4, 2), fully_connected=64, n_stages=None, n_blocks_per_stage=None, filters=24, kernels_growth=2, dropout=0.5, use_strides=False): '\n Implements SB-CNN model from\n Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification\n Salamon and Bello, 2016.\n https://arxiv.org/pdf/1608.04363.pdf\n\n Based on https://gist.github.com/jaron/5b17c9f37f351780744aefc74f93d3ae\n but parameters are changed back to those of the original paper authors,\n and added Batch Normalization\n ' Conv2 = (SeparableConv2D if (conv_block == 'depthwise_separable') else Convolution2D) assert (conv_block in ('conv', 'depthwise_separable')) kernel = conv_size if use_strides: strides = downsample_size pool = (1, 1) else: strides = (1, 1) pool = downsample_size block1 = [Convolution2D(filters, kernel, padding='same', strides=strides, input_shape=(bands, frames, channels)), BatchNormalization(), MaxPooling2D(pool_size=pool), Activation('relu')] block2 = [Conv2((filters * kernels_growth), kernel, padding='same', strides=strides), BatchNormalization(), MaxPooling2D(pool_size=pool), Activation('relu')] block3 = [Conv2((filters * kernels_growth), kernel, padding='valid', strides=strides), BatchNormalization(), Activation('relu')] backend = [Flatten(), Dropout(dropout), Dense(fully_connected, kernel_regularizer=l2(0.001)), Activation('relu'), Dropout(dropout), Dense(n_classes, kernel_regularizer=l2(0.001)), Activation('softmax')] layers = (((block1 + block2) + block3) + backend) model = Sequential(layers) return model
def build_model(frames=172, shingles=8, bands=40, channels=1, codebook=2000): '\n Implements convolution part of SKM model from\n\n UNSUPERVISED FEATURE LEARNING FOR URBAN SOUND CLASSIFICATION\n Justin Salamon and Juan Pablo Bello, 2015\n ' input_shape = (bands, frames, channels) kernel = (bands, shingles) model = Sequential([Convolution2D(codebook, kernel, strides=(1, shingles), padding='same', activation=None, input_shape=input_shape)]) return model
def main(): print('original') m = build_model() m.summary()
def build_tiny_conv(input_frames, input_bins, n_classes=12, dropout=0.5): '\n Ported from Tensorflow examples. create_tiny_conv_model\n ' from keras.layers import Conv2D, Dense, Dropout, Flatten input_shape = (input_bins, input_frames, 1) model = keras.Sequential([Conv2D(8, (8, 10), strides=(2, 2), padding='same', activation='relu', use_bias=True, input_shape=input_shape), Dropout(dropout), Flatten(), Dense(n_classes, activation='softmax', use_bias=True)]) return model
def build_one(frames=64, bands=40, n_classes=10, dropout=0.0, tstride=1, fstride=4): "\n Ported from Tensorflow examples. create_low_latency_conv\n\n This is roughly the network labeled as 'cnn-one-fstride4' in the\n 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:\n http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf\n " from keras.layers import Conv2D, Dense, Dropout, Flatten conv_f = 8 conv_t = 32 kernels = 90 bottleneck = 32 input_shape = (frames, bands, 1) model = keras.Sequential([Conv2D(kernels, (conv_t, conv_f), strides=(tstride, fstride), padding='valid', activation='relu', use_bias=True, input_shape=input_shape), Dense(bottleneck, activation=None, use_bias=True), Dropout(dropout), Dense(128, activation='relu', use_bias=True), Dropout(dropout), Dense(128, activation='relu', use_bias=True), Dropout(dropout), Dense(n_classes, activation='softmax', use_bias=True)]) return model
def build_low_latency_conv(input_frames, input_bins, n_classes=12, dropout=0.5): "\n Ported from Tensorflow examples. create_low_latency_conv\n\n This is roughly the network labeled as 'cnn-one-fstride4' in the\n 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:\n http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf\n " from keras.layers import Conv2D, Dense, Dropout, Flatten input_shape = (input_frames, input_bins, 1) model = keras.Sequential([Conv2D(186, (input_frames, 8), strides=(1, 1), padding='valid', activation='relu', use_bias=True, input_shape=input_shape), Dropout(dropout), Flatten(), Dense(128, activation=None, use_bias=True), Dropout(dropout), Dense(128, activation=None, use_bias=True), Dropout(dropout), Dense(n_classes, activation='softmax', use_bias=True)]) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) return model
def build_aclnet_lowlevel(input_samples, c1=32, s1=8, s2=4, input_tensor=None): '\n\n The following values were tested in the paper.\n c1= 8,16,32\n s1= 2,4,8\n s2= 2,4\n ' from keras.layers import Conv1D, MaxPooling1D, InputLayer, Flatten, Dense input_shape = (input_samples, 1) model = keras.Sequential([InputLayer(input_shape=input_shape, input_tensor=input_tensor), Conv1D(filters=c1, kernel_size=9, strides=s1, padding='valid', activation=None, use_bias=False), Conv1D(filters=64, kernel_size=5, strides=s2, padding='valid', activation=None, use_bias=False), MaxPooling1D(pool_size=(int((160 / (s2 * s1))),), padding='valid', data_format='channels_last'), Flatten(), Dense(1, activation=None)]) return model
def main(): m = build_low_latency_conv(41, 40) m.summary() m = build_tiny_conv(32, 40) m.summary() m = build_one() m.summary()
def fire_module(x, fire_id, squeeze=16, expand=64): sq1x1 = 'squeeze1x1' exp1x1 = 'expand1x1' exp3x3 = 'expand3x3' relu = 'relu_' s_id = (('fire' + str(fire_id)) + '/') from keras.layers import concatenate x = Convolution2D(squeeze, (1, 1), padding='valid', name=(s_id + sq1x1))(x) x = Activation('relu', name=((s_id + relu) + sq1x1))(x) left = Convolution2D(expand, (1, 1), padding='valid', name=(s_id + exp1x1))(x) left = Activation('relu', name=((s_id + relu) + exp1x1))(left) right = Convolution2D(expand, (3, 3), padding='same', name=(s_id + exp3x3))(x) right = Activation('relu', name=((s_id + relu) + exp3x3))(right) assert (keras.backend.image_data_format() == 'channels_last') x = concatenate([left, right], axis=3, name=(s_id + 'concat')) return x
def build_model(frames=32, bands=32, channels=1, n_classes=10, dropout=0.5, n_stages=3, modules_per_stage=2, initial_filters=64, squeeze_ratio=0.2, pool=(2, 2), kernel=(3, 3), stride_f=2, stride_t=2): from keras.models import Model from keras.layers import Input, GlobalAveragePooling2D, Dropout, MaxPooling2D input_shape = (bands, frames, channels) img_input = keras.layers.Input(shape=input_shape) x = Convolution2D(initial_filters, (3, 3), strides=(stride_f, stride_t), padding='valid', name='conv1')(img_input) x = Activation('relu', name='relu_conv1')(x) module_idx = 0 for stage_no in range(1, n_stages): expand = (initial_filters * stage_no) squeeze = int((expand * squeeze_ratio)) x = MaxPooling2D(pool_size=pool, strides=(stride_f, stride_t), name=('pool' + str(stage_no)))(x) for module_no in range(modules_per_stage): x = fire_module(x, fire_id=module_idx, squeeze=squeeze, expand=expand) module_idx += 1 x = Dropout(dropout, name='drop9')(x) x = Convolution2D(n_classes, (1, 1), padding='valid', name='topconv')(x) x = Activation('relu', name='relu_topconv')(x) x = GlobalAveragePooling2D()(x) x = Activation('softmax', name='loss')(x) model = keras.Model(img_input, x) return model
def add_common(x, name): x = BatchNormalization(name=(name + '_bn'))(x) x = Activation('relu', name=(name + '_relu'))(x) return x
def conv(x, kernel, filters, downsample, name, padding='same'): 'Regular convolutional block' x = Conv2D(filters, kernel, strides=downsample, name=name, padding=padding)(x) return add_common(x, name)
def conv_ds(x, kernel, filters, downsample, name, padding='same'): 'Depthwise Separable convolutional block\n (Depthwise->Pointwise)\n\n MobileNet style' x = SeparableConv2D(filters, kernel, padding=padding, strides=downsample, name=(name + '_ds'))(x) return add_common(x, name=(name + '_ds'))
def conv_bottleneck_ds(x, kernel, filters, downsample, name, padding='same', bottleneck=0.5): '\n Bottleneck -> Depthwise Separable\n (Pointwise->Depthwise->Pointswise)\n\n MobileNetV2 style\n ' if (padding == 'valid'): pad = ((0, (kernel[0] // 2)), (0, (kernel[0] // 2))) x = ZeroPadding2D(padding=pad, name=(name + 'pad'))(x) x = Conv2D(int((filters * bottleneck)), (1, 1), padding='same', strides=downsample, name=(name + '_pw'))(x) add_common(x, (name + '_pw')) x = SeparableConv2D(filters, kernel, padding=padding, strides=(1, 1), name=(name + '_ds'))(x) return add_common(x, (name + '_ds'))
def conv_effnet(x, kernel, filters, downsample, name, bottleneck=0.5, strides=(1, 1), padding='same', bias=False): 'Pointwise -> Spatially Separable conv&pooling \n Effnet style' assert (downsample[0] == downsample[1]) downsample = downsample[0] assert (kernel[0] == kernel[1]) kernel = kernel[0] ch_in = int((filters * bottleneck)) ch_out = filters if (padding == 'valid'): pad = ((0, (kernel // 2)), (0, (kernel // 2))) x = ZeroPadding2D(padding=pad, name=(name + 'pad'))(x) x = Conv2D(ch_in, (1, 1), strides=downsample, padding=padding, use_bias=bias, name=(name + 'pw'))(x) x = add_common(x, name=(name + 'pw')) x = DepthwiseConv2D((1, kernel), padding=padding, use_bias=bias, name=(name + 'dwv'))(x) x = add_common(x, name=(name + 'dwv')) x = DepthwiseConv2D((kernel, 1), padding='same', use_bias=bias, name=(name + 'dwh'))(x) x = add_common(x, name=(name + 'dwh')) x = Conv2D(ch_out, (1, 1), padding=padding, use_bias=bias, name=(name + 'rh'))(x) return add_common(x, name=(name + 'rh'))
def backend_dense1(x, n_classes, fc=64, regularization=0.001, dropout=0.5): from keras.regularizers import l2 '\n SB-CNN style classification backend\n ' x = Flatten()(x) x = Dropout(dropout)(x) x = Dense(fc, kernel_regularizer=l2(regularization))(x) x = Activation('relu')(x) x = Dropout(dropout)(x) x = Dense(n_classes, kernel_regularizer=l2(regularization))(x) x = Activation('softmax')(x) return x
def build_model(frames=128, bands=128, channels=1, n_classes=10, conv_size=(5, 5), conv_block='conv', downsample_size=(2, 2), n_stages=3, n_blocks_per_stage=1, filters=24, kernels_growth=1.5, fully_connected=64, dropout=0.5, l2=0.001): '\n \n ' input = Input(shape=(bands, frames, channels)) x = input block_no = 0 for stage_no in range(0, n_stages): for b_no in range(0, n_blocks_per_stage): padding = ('valid' if (block_no == ((n_stages * n_blocks_per_stage) - 1)) else 'same') downsample = (downsample_size if (b_no == 0) else (1, 1)) conv_func = (conv if (block_no == 0) else block_types.get(conv_block)) name = 'conv{}'.format(block_no) x = conv_func(x, conv_size, int(filters), downsample, name=name, padding=padding) block_no += 1 filters = (filters * kernels_growth) x = backend_dense1(x, n_classes, fully_connected, regularization=l2) model = Model(input, x) return model
def plot(): models = pandas.read_csv('models.csv') (fig, ax) = plt.subplots(1) print(models.head(10)) print(models.index) n_labels = len(models['name']) colors = matplotlib.cm.rainbow(numpy.linspace(0, 1, n_labels)) for (i, r) in models.iterrows(): ax.plot((r['parameters'] / 1000), r['accuracy'], 'o', label=r['name'], markersize=5, color=colors[i], linewidth=0.1) ax.legend(loc='best') fig.savefig('perf.png')
def augmentations(audio, sr): ts = [0.81, 0.93, 1.07, 1.23] ps = [(- 2), (- 1), 1, 2, (- 3.5), (- 2.5), 2.5, 3.5] out = {} for stretch in ts: name = 'ts{:.2f}'.format(stretch) out[name] = librosa.effects.time_stretch(audio, stretch) for shift in ps: name = 'ps{:.2f}'.format(shift) out[name] = librosa.effects.pitch_shift(audio, sr, shift) return out
def compute(inp, outp, settings, force): sr = settings['samplerate'] _lazy_y = None def load(): nonlocal _lazy_y if (_lazy_y is None): (_lazy_y, _sr) = librosa.load(inp, sr=sr) assert (_sr == sr), _sr return _lazy_y exists = os.path.exists(outp) size = 0 if exists: size = os.stat(outp).st_size valid = (exists and (size > 0)) if ((not valid) or force): start_time = time.time() y = load() loaded_time = time.time() f = features.compute_mels(y, settings) computed_time = time.time() numpy.savez(outp, f) saved_time = time.time() if settings['augmentations']: paths = [outp.replace('.npz', '.aug{}.npz'.format(aug)) for aug in range(12)] exists = [os.path.exists(p) for p in paths] if ((not all(exists)) or force): y = load() augmented = augmentations(y, sr).values() assert (settings['augmentations'] == 12) assert (len(augmented) == settings['augmentations']), len(augmented) for (aug, (augdata, path)) in enumerate(zip(augmented, paths)): f = features.compute_mels(augdata, settings) numpy.savez(path, f) return outp
def precompute(samples, settings, out_dir, n_jobs=8, verbose=1, force=False): out_folder = out_dir def job_spec(sample): path = urbansound8k.sample_path(sample) out_path = features.feature_path(sample, out_folder) f = os.path.split(out_path)[0] if (not os.path.exists(f)): os.makedirs(f) return (path, out_path, settings, force) jobs = [joblib.delayed(compute)(*job_spec(sample)) for (_, sample) in samples.iterrows()] feature_files = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)(jobs)
def parse(): import argparse parser = argparse.ArgumentParser(description='Preprocess audio into features') common.add_arguments(parser) a = parser.add_argument a('--archive', dest='archive_dir', default='', help='') a('--jobs', type=int, default=8, help='Number of parallel jobs') a('--force', type=bool, default=False, help='Always recompute features') parsed = parser.parse_args() return parsed
def main(): args = parse() archive = args.archive_dir urbansound8k.default_path = os.path.join(args.datasets_dir, 'UrbanSound8K/') urbansound8k.maybe_download_dataset(args.datasets_dir) data = urbansound8k.load_dataset() settings = common.load_settings_path(args.settings_path) settings = features.settings(settings) features_path = os.path.join(args.features_dir, features.settings_id(settings)) common.ensure_directories(features_path) precompute(data, settings, out_dir=features_path, verbose=2, force=args.force, n_jobs=args.jobs) if archive: print('Archiving as {}.zip'.format(features_path)) shutil.make_archive(archive_path, 'zip', features_path)
def populate_defaults(): s = {} for n in names: v = default_model_settings.get(n, None) if (v is None): v = default_training_settings.get(n, None) if (v is None): v = default_feature_settings.get(n, None) s[n] = v return s
def test_no_overlapping_settings(): f = default_feature_settings.keys() t = default_training_settings.keys() m = default_model_settings.keys() assert (len(names) == ((len(f) + len(t)) + len(m)))
def parse_dimensions(s): pieces = s.split('x') return tuple((int(d) for d in pieces))
def test_parse_dimensions(): valid_examples = [('3x3', (3, 3)), ('4x2', (4, 2))] for (inp, expect) in valid_examples: out = parse_dimensions(inp) assert (out == expect), (out, '!=', expect)
def load_settings(args): settings = {} for key in names: string = args.get(key, defaults[key]) parser = parsers.get(key, (lambda x: x)) value = parser(string) settings[key] = value return settings
def test_settings_empty(): load_settings({})
def add_arguments(parser): a = parser.add_argument for name in names: data_type = type(defaults[name]) default = None a('--{}'.format(name), default=default, type=data_type, help='%(default)s')
def compute_conv2d(in_h, in_w, in_ch, out_ch, k_w, k_h): 'Compute complexity for standard Conv2D\n\n ' return ((((in_h * in_w) * in_ch) * out_ch) * (k_w * k_h))
def compute_conv2d_pw(in_h, in_w, in_ch, out_ch): 'Compute complexity for Pointwise (1x1) Conv2D\n\n $$ O_{pw} = HWNM $$\n ' return (((in_h * in_w) * in_ch) * out_ch)
def compute_conv2d_dw(in_h, in_w, in_ch, k_w, k_h): 'Compute complexity for Depthwise Conv2D\n\n $$ O_{dw} = HWNK_wK_h $$\n ' return (((in_h * in_w) * in_ch) * (k_w * k_h))
def compute_conv2d_ds(in_h, in_w, in_ch, out_ch, k_w, k_h): 'Complexity for Depthwise Separable\n\n $$ O_{ds} = O_pw + O_dw $$\n ' pw = compute_conv2d_pw(in_h, in_w, in_ch, out_ch) dw = compute_conv2d_dw(in_h, in_w, in_ch, k_w, k_h) return (pw + dw)
def is_training_scope(scope): patterns = ('/random_uniform', '/weight_regularizer', '/dropout_', '/dropout/', 'AssignMovingAvg') is_training = False for t in patterns: if (t in scope): is_training = True return is_training
def analyze_model(build_func, input_shapes, n_classes): from tensorflow.python.framework import graph_util import tensorflow.python.framework.ops as ops from tensorflow.compat.v1.graph_util import remove_training_nodes from tensorflow.python.tools import optimize_for_inference_lib g = tf.Graph() run_meta = tf.RunMetadata() with tf.Session(graph=g) as sess: keras.backend.set_session(sess) base = build_func() inputs = [] for shape in input_shapes: input_shape = ([1] + list(shape)) inp = tf.placeholder(tf.float32, input_shape) inputs.append(inp) model = base(inputs) opts = tf.profiler.ProfileOptionBuilder().trainable_variables_parameter() opts['output'] = 'none' params_stats = tf.profiler.profile(g, run_meta=run_meta, cmd='scope', options=opts) params = {} for scope in params_stats.children: params[scope.name] = scope.total_parameters flops = {} opts = tf.profiler.ProfileOptionBuilder().float_operation() opts['output'] = 'none' flops_stats = tf.profiler.profile(g, run_meta=run_meta, cmd='scope', options=opts) for scope in flops_stats.children: flops[scope.name] = scope.total_float_ops return (flops, params)
def layer_info(model): df = pandas.DataFrame({'name': [l.name for l in model.layers], 'type': [l.__class__.__name__ for l in model.layers], 'shape_in': [l.get_input_shape_at(0)[1:] for l in model.layers], 'shape_out': [l.get_output_shape_at(0)[1:] for l in model.layers]}) df['size_in'] = df.shape_in.apply(numpy.prod) df['size_out'] = df.shape_out.apply(numpy.prod) return df
def stm32layer_sizes(stats): activation_types = set(['_output_array', '_output_in_array', '_output_out_array']) weight_types = set(['_weights_array', '_bias_array', '_scale_array']) array_types = activation_types.union(weight_types) def lazy_add(d, key, value): if (d.get(key, None) is None): d[key] = 0 d[key] += value activations = {} weights = {} for (name, size) in stats['arrays'].items(): known = False for suffix in array_types: if name.endswith(suffix): layer_name = name.rstrip(suffix) out = (activations if (suffix in activation_types) else weights) lazy_add(out, layer_name, size) known = True assert known, 'Unknown array {}'.format(name) layers = set(activations.keys()).union(set(weights.keys())) df = pandas.DataFrame({'activations': [activations.get(n, math.nan) for n in layers], 'weights': [weights.get(n, math.nan) for n in layers]}, dtype='int', index=list(layers)) return df
def model_info(model): with tempfile.TemporaryDirectory(prefix='microesc') as tempdir: out_dir = tempdir if (type(model) == str): model_path = model model = keras.models.load_model(model_path) else: model_path = os.path.join(out_dir, 'model.hd5f') model.save(model_path) out_path = os.path.join(out_dir, 'gen') stats = stm32convert.generatecode(model_path, out_path, name='network', model_type='keras', compression=None) layers = layer_info(model) sizes = stm32layer_sizes(stats) combined = layers.join(sizes, on='name', how='inner') del stats['arrays'] return (stats, combined)
def check_model_constraints(model, max_ram=64000.0, max_maccs=(4500000.0 * 0.72), max_flash=512000.0): (stats, combined) = model_info(model) def check(val, limit, message): assert (val <= limit), message.format(val, limit) check(stats['flash_usage'], max_flash, 'FLASH use too high: {} > {}') check(stats['ram_usage_max'], max_ram, 'RAM use too high: {} > {}') check(stats['maccs_frame'], max_maccs, 'CPU use too high: {} > {}') return (stats, combined)
def main(): sample_rate = 44100 window_stride_ms = 10 def build_speech_tiny(): return speech.build_tiny_conv(input_frames=frames, input_bins=bands, n_classes=10) models = {'SB-CNN': (sbcnn.build_model, [(128, 128, 1)])} model_params = {} model_flops = {} model_stats = {name: analyze_model(build, shape, n_classes=10) for (name, (build, shape)) in models.items()} for (name, stats) in model_stats.items(): (flops, params) = stats inference_flops = {name: v for (name, v) in flops.items() if (not is_training_scope(name))} total_flops = sum(inference_flops.values()) total_params = sum(params.values()) model_params[name] = total_params model_flops[name] = total_flops print(name) print('Total: {:.2f}M FLOPS, {:.2f}K params'.format((total_flops / 1000000.0), (total_params / 1000.0))) print('\n'.join(['\t{}: {} flops'.format(name, v) for (name, v) in inference_flops.items()])) print('') print('\n'.join(['\t{}: {} params'.format(name, v) for (name, v) in params.items()])) print('\n') print('p', model_params) print('f', model_flops)
def generate_config(model_path, out_path, name='network', model_type='keras', compression=None): data = {'name': name, 'toolbox': model_options[model_type], 'models': {'1': [model_path, ''], '2': [model_path, ''], '3': [model_path, ''], '4': [model_path]}, 'compression': compression, 'pinnr_path': out_path, 'src_path': out_path, 'inc_path': out_path, 'plot_file': os.path.join(out_path, 'network.png')} return json.dumps(data)
def parse_with_unit(s): (number, unit) = s.split() number = float(number) multipliers = {'KBytes': 1000.0, 'MBytes': 1000000.0} mul = multipliers[unit] return (number * mul)