code
stringlengths
17
6.64M
def Adam(dx, learner, learning_rate, t, eps=1e-08, beta1=0.9, beta2=0.999): learner.m = ((beta1 * learner.m) + ((1 - beta1) * dx)) mt = (learner.m / (1 - (beta1 ** t))) learner.v = ((beta2 * learner.v) + ((1 - beta2) * (dx ** 2))) vt = (learner.v / (1 - (beta2 ** t))) update = ((learning_rate * mt) / (np.sqrt(vt) + eps)) return update
@ray.remote def create_shared_noise(): '\n Create a large array of noise to be shared by all workers. Used \n for avoiding the communication of the random perturbations delta.\n ' seed = 12345 count = 2500000 noise = np.random.RandomState(seed).randn(count).astype(np.float64) return noise
class SharedNoiseTable(object): def __init__(self, noise, seed=11): self.rg = np.random.RandomState(seed) self.noise = noise assert (self.noise.dtype == np.float64) def get(self, i, dim): return self.noise[i:(i + dim)] def sample_index(self, dim): return self.rg.randint(0, ((len(self.noise) - dim) + 1)) def get_delta(self, dim): idx = self.sample_index(dim) return (idx, self.get(idx, dim))
def rbf_kernel(x, y, sigma): return np.exp(((- (np.linalg.norm((x - y)) ** 2)) / (2 * (sigma ** 2))))
def rbf_kernel_grad(x, y, sigma): return (((x - y) / (sigma ** 2)) * rbf_kernel(x, y, sigma))
class Filter(object): 'Processes input, possibly statefully.' def update(self, other, *args, **kwargs): 'Updates self with "new state" from other filter.' raise NotImplementedError def copy(self): 'Creates a new object with same state as self.\n Returns:\n copy (Filter): Copy of self' raise NotImplementedError def sync(self, other): 'Copies all state from other filter to self.' raise NotImplementedError
class NoFilter(Filter): def __init__(self, *args): pass def __call__(self, x, update=True): return np.asarray(x, dtype=np.float64) def update(self, other, *args, **kwargs): pass def copy(self): return self def sync(self, other): pass def stats_increment(self): pass def clear_buffer(self): pass def get_stats(self): return (0, 1) @property def mean(self): return 0 @property def var(self): return 1 @property def std(self): return 1
class RunningStat(object): def __init__(self, shape=None): self._n = 0 self._M = np.zeros(shape, dtype=np.float64) self._S = np.zeros(shape, dtype=np.float64) self._M2 = np.zeros(shape, dtype=np.float64) def copy(self): other = RunningStat() other._n = self._n other._M = np.copy(self._M) other._S = np.copy(self._S) return other def push(self, x): x = np.asarray(x) assert (x.shape == self._M.shape), 'x.shape = {}, self.shape = {}'.format(x.shape, self._M.shape) n1 = self._n self._n += 1 if (self._n == 1): self._M[...] = x else: delta = (x - self._M) deltaM2 = (np.square(x) - self._M2) self._M[...] += (delta / self._n) self._S[...] += (((delta * delta) * n1) / self._n) def update(self, other): n1 = self._n n2 = other._n n = (n1 + n2) delta = (self._M - other._M) delta2 = (delta * delta) M = (((n1 * self._M) + (n2 * other._M)) / n) S = ((self._S + other._S) + (((delta2 * n1) * n2) / n)) self._n = n self._M = M self._S = S def __repr__(self): return '(n={}, mean_mean={}, mean_std={})'.format(self.n, np.mean(self.mean), np.mean(self.std)) @property def n(self): return self._n @property def mean(self): return self._M @property def var(self): return ((self._S / (self._n - 1)) if (self._n > 1) else np.square(self._M)) @property def std(self): return np.sqrt(self.var) @property def shape(self): return self._M.shape
class MeanStdFilter(Filter): 'Keeps track of a running mean for seen states' def __init__(self, shape, demean=True, destd=True): self.shape = shape self.demean = demean self.destd = destd self.rs = RunningStat(shape) self.buffer = RunningStat(shape) self.mean = np.zeros(shape, dtype=np.float64) self.std = np.ones(shape, dtype=np.float64) def clear_buffer(self): self.buffer = RunningStat(self.shape) return def update(self, other, copy_buffer=False): 'Takes another filter and only applies the information from the\n buffer.\n Using notation `F(state, buffer)`\n Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,\n `update` modifies `Filter1` to `Filter1(x1 + yt, y1)`\n If `copy_buffer`, then `Filter1` is modified to\n `Filter1(x1 + yt, yt)`.\n ' self.rs.update(other.buffer) if copy_buffer: self.buffer = other.buffer.copy() return def copy(self): 'Returns a copy of Filter.' other = MeanStdFilter(self.shape) other.demean = self.demean other.destd = self.destd other.rs = self.rs.copy() other.buffer = self.buffer.copy() return other def sync(self, other): 'Syncs all fields together from other filter.\n Using notation `F(state, buffer)`\n Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,\n `sync` modifies `Filter1` to `Filter1(x2, yt)`\n ' assert (other.shape == self.shape), "Shapes don't match!" self.demean = other.demean self.destd = other.destd self.rs = other.rs.copy() self.buffer = other.buffer.copy() return def __call__(self, x, update=True): x = np.asarray(x, dtype=np.float64) if update: if (len(x.shape) == (len(self.rs.shape) + 1)): for i in range(x.shape[0]): self.rs.push(x[i]) self.buffer.push(x[i]) else: self.rs.push(x) self.buffer.push(x) if self.demean: x = (x - self.mean) if self.destd: x = (x / (self.std + 1e-08)) return x def stats_increment(self): self.mean = self.rs.mean self.std = self.rs.std self.std[(self.std < 1e-07)] = float('inf') return def get_stats(self): return (self.rs.mean, (self.rs.std + 1e-08)) def __repr__(self): return 'MeanStdFilter({}, {}, {}, {}, {}, {})'.format(self.shape, self.demean, self.rs, self.buffer)
def get_filter(filter_config, shape=None): if (filter_config == 'MeanStdFilter'): return MeanStdFilter(shape) elif (filter_config == 'NoFilter'): return NoFilter() else: raise Exception(('Unknown observation_filter: ' + str(filter_config)))
@ray.remote class Worker(object): import simpleenvs def __init__(self, env_seed, env_name='', shift=0, policy='FC', h_dim=64, layers=2, deltas=None, rollout_length=1000, delta_std=0.02, num_evals=0, ob_filter='NoFilter'): self.params = {} self.env_name = env_name self.params['env_name'] = env_name self.env = gym.make(env_name) self.params['ob_dim'] = self.env.observation_space.shape[0] self.params['ac_dim'] = self.env.action_space.shape[0] self.env.seed(0) self.params['h_dim'] = h_dim self.steps = rollout_length self.params['zeros'] = True self.params['seed'] = 0 self.params['layers'] = layers self.shift = shift self.sigma = 1 self.num_evals = num_evals self.params['ob_filter'] = ob_filter self.policy = get_policy(self.params) self.deltas = SharedNoiseTable(deltas, (env_seed + 7)) self.delta_std = delta_std def do_rollouts(self, policy, num_rollouts, selected_states, use_states=0, indices=None, seed=0, train=True): (rollout_rewards, deltas_idx, sparsities, data, embeddings) = ([], [], [], [], []) steps = 0 for i in range(num_rollouts): if (indices is None): (idx, delta) = self.deltas.get_delta(policy.size) else: idx = indices[i] delta = self.deltas.get(idx, policy.size) delta = (self.delta_std * delta).reshape(policy.shape) deltas_idx.append(idx) self.policy.update((policy + delta)) (pos_reward, pos_steps, pos_sparse, pos_data) = self.rollouts(seed, train) if use_states: pos_embedding = np.concatenate([self.policy.forward(x, eval=False) for x in selected_states], axis=0) else: pos_embedding = [] self.policy.update((policy - delta)) (neg_reward, neg_steps, neg_sparse, neg_data) = self.rollouts(seed, train) if use_states: neg_embedding = np.concatenate([self.policy.forward(x, eval=False) for x in selected_states], axis=0) else: neg_embedding = [] rollout_rewards.append([pos_reward, neg_reward]) sparsities.append([pos_sparse, neg_sparse]) data.append([pos_data, neg_data]) steps += (pos_steps + neg_steps) embeddings.append([pos_embedding, neg_embedding]) return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards, 'sparsities': sparsities, 'steps': steps, 'data': data, 'embedding': embeddings} def rollouts(self, seed=0, train=True): self.env._max_episode_steps = self.steps if (self.num_evals > 0): total_reward = 0 timesteps = 0 sparsity = self.policy.used data = [] for _ in range(self.num_evals): self.env.seed(None) state = self.env.reset() (reward, ts, sp, d) = self.rollout(state) sparsity += sp total_reward += reward timesteps += ts data += d else: if (not hasattr(self.env, 'tasks')): self.env.seed(seed) state = self.env.reset() (total_reward, timesteps, sparsity, data) = self.rollout(state) return (total_reward, timesteps, sparsity, data) def rollout(self, state): total_reward = 0 done = False timesteps = 0 sparsity = self.policy.used data = [] while (not done): action = self.policy.forward(state) if hasattr(self.env, 'envtype'): if (self.env.envtype == 'dm'): action = np.clip(action, self.env.env.action_spec().minimum, self.env.env.action_spec().maximum) else: action = np.clip(action, self.env.env.action_space.low[0], self.env.env.action_space.high[0]) action = action.reshape(len(action)) elif (self.env_name.split(':')[0] != 'bsuite'): action = np.clip(action, self.env.action_space.low[0], self.env.action_space.high[0]) action = action.reshape(len(action)) (state, reward, done, _) = self.env.step(action) total_reward += (reward - self.shift) timesteps += 1 data.append([state, reward, np.array(action)]) return (total_reward, timesteps, sparsity, data) def stats_increment(self): self.policy.observation_filter.stats_increment() return def get_filter(self): return self.policy.observation_filter def sync_filter(self, other): self.policy.observation_filter.sync(other) return
def explore(config): if (config['train_batch_size'] < (config['sgd_minibatch_size'] * 2)): config['train_batch_size'] = (config['sgd_minibatch_size'] * 2) if (config['num_sgd_iter'] < 1): config['num_sgd_iter'] = 1 config['target_delay'] = int(config['target_delay']) return config
def explore(config): if (config['train_batch_size'] < (config['sgd_minibatch_size'] * 2)): config['train_batch_size'] = (config['sgd_minibatch_size'] * 2) if (config['lambda'] > 1): config['lambda'] = 1 config['train_batch_size'] = int(config['train_batch_size']) return config
def create_eval_set(fold): data = urbansound8k.load_dataset() (folds, test) = urbansound8k.folds(data) test = test.copy() train = folds[fold][0].copy() val = folds[fold][1].copy() test['set'] = 'test' train['set'] = 'train' val['set'] = 'val' df = pandas.concat([test, val]) return df
def load_sample(sample): fsettings = features.settings(exsettings) return features.load_sample(sample, fsettings, start_time=sample.start, window_frames=exsettings['frames'], feature_dir='data/features')
def predict(model, data): return features.predict_voted(exsettings, model, data, loader=load_sample, method=exsettings['voting'], overlap=exsettings['voting_overlap'])
def model_predict(predictor, model_path, data): model = keras.models.load_model(model_path) p = predictor(model, data) return p
def threshold(df, q=0.8): q = df[(df.correct == False)].best_p.quantile(q=q) return q
def plot_errors(df, ax=None, q=0.8, bins=20, ylim=None): if (ax is None): (fig, ax) = plt.subplots(1) ((_, wrong), (isright, right)) = df.groupby('correct') assert (isright == True) right.best_p.hist(ax=ax, color='green', alpha=0.4, bins=bins) wrong.best_p.hist(ax=ax, color='red', alpha=0.4, bins=bins) ax.set_xlim((0, 1.0)) if ylim: ax.set_ylim(ylim) if (q is not None): p_min = threshold(df, q=q) ax.axvline(x=p_min, color='black', linewidth=1.0, alpha=0.7) return ax
def plot_errors_classwise(df, figsize=(12, 4)): groups = eval_set.groupby('class') (fig, axs) = plt.subplots(2, (len(groups) // 2), figsize=figsize) for (i, (classname, data)) in enumerate(groups): x = (i // 2) y = (i % 2) ax = axs[(y, x)] ax.set_title(classname) if (x != 0): ax.tick_params(labelleft=False) if (y == 0): ax.tick_params(labelbottom=False) bins = numpy.linspace(0, 1.0, 20) plot_errors(data, ax=ax, ylim=(0, 30), q=None, bins=bins) fig.tight_layout() return fig
def score(df, average=None, threshold=0.0): y_true = df.classID y_pred = df.best_y uncertain = (df.best_p < threshold) uncertain_ratio = (numpy.count_nonzero(uncertain.astype(int)) / len(y_pred)) y_pred = y_pred.mask(uncertain, 11) labels = list(range(0, 10)) precision = sklearn.metrics.precision_score(y_true, y_pred, average=average, labels=labels) recall = sklearn.metrics.recall_score(y_true, y_pred, average=average, labels=labels) out = pandas.Series({'precision': precision, 'recall': recall, 'uncertain': uncertain_ratio}) return out
def plot_precision_recall(data, ax=None): df = pandas.DataFrame({'threshold': numpy.linspace(0, 1.0, 50, endpoint=False)}) micro = df.apply((lambda r: score(data, average='micro', threshold=r.threshold)), axis=1) micro['threshold'] = df.threshold micro['micro'] = micro.precision macro = df.apply((lambda r: score(data, average='macro', threshold=r.threshold)), axis=1) macro['threshold'] = df.threshold macro['macro'] = macro.precision (fig, (ax2, ax)) = plt.subplots(1, 2, figsize=(8, 4)) macro.plot.line(ax=ax2, y=['precision', 'recall'], x='threshold', ylim=(0.0, 1.0)) micro.plot.line(ax=ax, y='micro', x='recall', ylim=(0.0, 1.0), xlim=(0, 1)) macro.plot.line(ax=ax, y='macro', x='recall', ylim=(0.0, 1.0), xlim=(0, 1)) ax.set_yticks(numpy.arange(0.0, 1.0, 0.1)) ax.grid(True) ax.set_ylabel('Precision') ax.set_xlabel('Recall') ax.set_aspect('equal') ax2.set_aspect('equal') ax2.set_yticks(numpy.arange(0.0, 1.0, 0.1)) ax2.grid(True) ax2.set_xlabel('Probability threshold for "unknown" class') ax2.set_ylabel('Performance metric') fig.tight_layout()
def load_device_results(results_dir): frames = [] for filename in os.listdir(results_dir): if filename.endswith('.device.json'): experiment = filename.rstrip('.device.json') p = os.path.join(results_dir, filename) with open(p, 'r') as f: contents = f.read() contents = contents.replace("'", '"') d = json.loads(contents) d['experiment'] = experiment df = pandas.DataFrame([d]) frames.append(df) df = pandas.concat(frames) df.set_index('experiment', inplace=True) return df
def plot_layers_ram(layers_ram, ax=None, max_ram=64000.0): if (not ax): (fig, ax) = plt.subplots(1, figsize=(4, 6)) l = layers_ram.sort_index(ascending=False) l['activations_ram'] = (4 * l.activations) l = l[l.activations_ram.notna()] l.plot(kind='barh', ax=ax, y='activations_ram', x='name') ax.axvspan(xmin=0, xmax=max_ram, alpha=0.2, color='green') return fig
def read_report(ser): lines = [] state = 'wait-for-start' while (state != 'ended'): raw = ser.readline() line = raw.decode('utf-8').strip() if (state == 'wait-for-start'): if line.startswith('Results for'): state = 'started' if (state == 'started'): lines.append(line) if line.endswith('cfg=0'): state = 'ended' return '\n'.join(lines)
def parse_report(report): out = {} result_regexp = '@(\\d*)MHz\\/(\\d*)MHz.*complexity:\\s(\\d*)\\sMACC' matches = list(re.finditer(result_regexp, report, re.MULTILINE)) (cpu_freq, cpu_freq_max, macc) = matches[0].groups() out['cpu_mhz'] = int(cpu_freq) out['macc'] = int(macc) key_value_regex = '(.*)\\s:\\s(.*)' matches = re.finditer(key_value_regex, report, re.MULTILINE) for (matchNum, match) in enumerate(matches, start=1): (key, value) = match.groups() key = key.strip() value = value.strip() if (key == 'used stack'): out['stack'] = int(value.rstrip(' bytes')) if (key == 'duration'): out['duration_avg'] = (float(value.rstrip(' ms (average)')) / 1000) if (key == 'CPU cycles'): out['cycles_avg'] = int(value.split()[0]) out['cycles_macc'] = (out['cycles_avg'] / out['macc']) return out
def test_parse_report(): out = parse_report(example_report) assert (out['duration_avg'] == 0.325142) assert (out['cycles_avg'] == 26011387) assert (out['stack'] == 276) assert (out['cpu_mhz'] == 80) assert (out['macc'] == 2980798)
def main(): test_parse_report() device = '/dev/ttyACM0' baudrate = 115200 with serial.Serial(device, baudrate, timeout=0.5) as ser: thrash = ser.read(10000) report = read_report(ser) out = parse_report(report) print(json.dumps(out))
def ensure_dir(directory): if (not os.path.exists(directory)): os.makedirs(directory)
def ensure_dir_for_file(path): directory = os.path.dirname(path) ensure_dir(directory)
def ensure_directories(*dirs): for dir in dirs: ensure_dir(dir)
def add_arguments(parser): a = parser.add_argument a('--datasets', dest='datasets_dir', default='./data/datasets', help='%(default)s') a('--features', dest='features_dir', default='./data/features', help='%(default)s') a('--models', dest='models_dir', default='./data/models', help='%(default)s') a('--settings', dest='settings_path', default='./experiments/ldcnn20k60.yaml', help='%(default)s')
def load_settings_path(path): with open(path, 'r') as config_file: settings = yaml.load(config_file.read()) return settings
def arglist(options): def format_arg(k, v): if (v is None): return '--{}'.format(k) else: return '--{}={}'.format(k, v) args = [format_arg(k, v) for (k, v) in options.items()] return args
def command_for_job(options): args = ['python3', 'train.py'] args += arglist(options) return args
def generate_train_jobs(experiments, settings_path, folds, overrides, ignored=['nickname']): timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M') unique = str(uuid.uuid4())[0:4] def name(experiment, fold): name = '-'.join([experiment, timestamp, unique]) return (name + '-fold{}'.format(fold)) def create_job(exname, experiment, fold): n = name(exname, fold) options = {'name': n, 'fold': fold, 'settings': settings_path} for (k, v) in experiment.items(): if (k == 'modelcheck'): if (v == 'skip'): options['skip_model_check'] = None else: options[k] = v for (k, v) in overrides.items(): options[k] = v for k in ignored: del options[k] return options jobs = [] for fold in folds: for (idx, ex) in experiments.iterrows(): j = create_job(str(idx), ex, fold) jobs.append(j) assert (len(jobs) == (len(experiments) * len(folds))), len(jobs) return jobs
def run_job(jobdata, out_dir, verbose=2): args = command_for_job(jobdata) job_dir = os.path.join(out_dir, jobdata['name']) common.ensure_directories(job_dir) log_path = os.path.join(job_dir, 'stdout.log') cmdline = ' '.join(args) with open(os.path.join(job_dir, 'cmdline'), 'w') as f: f.write(cmdline) start = time.time() print('starting job', cmdline) print('job log', log_path) exitcode = None with open(log_path, 'w') as log_file: process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE) for line in iter(process.stdout.readline, b''): line = line.decode('utf-8') if (verbose > 2): sys.stdout.write(line) log_file.write(line) log_file.flush() exitcode = process.wait() files = os.listdir(job_dir) assert ('train.csv' in files), files assert ('history.csv' in files), files model_files = [p for p in files if p.endswith('.hdf5')] assert (len(model_files) > 0), files end = time.time() res = {'start': start, 'end': end, 'exitcode': exitcode} return res
def run_jobs(commands, out_dir, n_jobs=5, verbose=1): jobs = [joblib.delayed(run_job)(cmd, out_dir) for cmd in commands] out = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)(jobs) return out
def parse(args): import argparse parser = argparse.ArgumentParser(description='Generate jobs') common.add_arguments(parser) a = parser.add_argument a('--experiments', default='models.csv', help='%(default)s') a('--check', action='store_true', help='Only run a pre-flight check') a('--jobs', type=int, default=5, help='Number of parallel jobs') a('--folds', type=int, default=10, help='Number of folds to test') a('--start', type=int, default=0, help='First experiment') a('--stop', type=int, default=None, help='Last experiment') parsed = parser.parse_args(args) return parsed
def main(): args = parse(sys.argv[1:]) experiments = pandas.read_csv(args.experiments) settings = common.load_settings_path(args.settings_path) stop = (len(experiments) if (args.stop is None) else args.stop) experiments = experiments.loc[range(args.start, stop)] overrides = {} folds = list(range(1, (args.folds + 1))) assert (max(folds) <= 10) if args.check: batches = 2 overrides['batch'] = 10 overrides['epochs'] = 1 overrides['train_samples'] = (batches * overrides['batch']) overrides['val_samples'] = (batches * overrides['batch']) cmds = generate_train_jobs(experiments, args.settings_path, folds, overrides) print('Preparing {} jobs', len(cmds)) print('\n'.join([c['name'] for c in cmds])) out = run_jobs(cmds, args.models_dir, n_jobs=args.jobs) print(out) success = all([(o['exitcode'] == 0) for o in out]) assert success
def build(settings): builder = families.get(settings['model']) options = dict(frames=settings['frames'], bands=settings['n_mels'], channels=settings.get('channels', 1)) known_settings = ['conv_size', 'conv_block', 'downsample_size', 'n_stages', 'dropout', 'fully_connected', 'n_blocks_per_stage', 'filters'] for k in known_settings: v = settings.get(k, None) options[k] = v model = builder(**options) return model
def build_model(frames=128, bands=40, channels=1, n_classes=10, conv_size=(3, 3), conv_block='conv', downsample_size=(2, 2), n_stages=3, n_blocks_per_stage=1, filters=128, kernels_growth=1.0, fully_connected=64, rnn_units=32, temporal='bigru', dropout=0.5, l2=0.001, backend='detection'): from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Conv2D, LSTM, GRU, Bidirectional, MaxPooling2D, Reshape, TimeDistributed, Softmax, Dense, SeparableConv2D model = Sequential() input_shape = (frames, bands, channels) def add_conv_block(model, downsample_size, conv_filters=filters, kernel_size=conv_size, **kwargs): model.add(SeparableConv2D(conv_filters, conv_size, **kwargs)) model.add(MaxPooling2D(downsample_size)) add_conv_block(model, downsample_size=(1, 5), input_shape=input_shape) add_conv_block(model, downsample_size=(1, 2)) add_conv_block(model, downsample_size=(1, 2)) if (temporal == 'bigru'): o = model.layers[(- 1)].output_shape model.add(Reshape((o[1], (- 1)))) model.add(Bidirectional(GRU(rnn_units, return_sequences=True))) model.add(Bidirectional(GRU(rnn_units, return_sequences=True))) elif (temporal == 'tcn'): model.add(SeparableConv2D(rnn_units, (9, 1), strides=(2, 1))) model.add(SeparableConv2D(rnn_units, (9, 1), strides=(2, 1))) else: raise ValueError(f'Unknown temporal parameter {temporal}') o = model.layers[(- 1)].output_shape if (backend == 'classification'): model.add(TimeDistributed(Dense(fully_connected, activation='linear'))) model.add(layers.Dense(n_classes)) model.add(Softmax()) elif (backend == 'detection'): model.add(TimeDistributed(Dense(n_classes, activation='linear'), input_shape=(o[1], o[2]))) model.add(Softmax()) elif (not backend): pass else: raise ValueError(f"Unsupported backend '{backend}'") return model
def test_model(): model = build_model(filters=24, bands=64, rnn_units=16, n_classes=3, temporal='tcn') print(model.summary())
def dcnn_head(input, head_name, filters=80, kernel=(3, 3)): def n(base): return ((base + '_') + head_name) from keras.layers import Convolution2D, Flatten, MaxPooling2D x = input x = Convolution2D(filters, kernel, dilation_rate=(1, 1), name=n('DilaConv1'))(x) x = MaxPooling2D(pool_size=(4, 3), name=n('MPL1'))(x) x = Convolution2D(filters, kernel, dilation_rate=(2, 2), name=n('DilaConv2'))(x) x = MaxPooling2D(pool_size=(1, 3), name=n('MPL2'))(x) x = Flatten(name=n('flatten'))(x) return x
def dcnn(bands=60, frames=31, n_classes=10, fully_connected=5000, filters=80, activation='relu'): '\n Dilated Convolution Neural Network with LeakyReLU for Environmental Sound Classification\n\n https://ieeexplore.ieee.org/document/8096153\n ' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate import keras.layers input_shape = (bands, frames, 1) def head(input, name): return dcnn_head(input, name, filters) mel_input = Input(shape=input_shape, name='mel_input') delta_input = Input(shape=input_shape, name='delta_input') heads = [head(mel_input, 'mel'), head(delta_input, 'delta')] m = keras.layers.concatenate(heads) m = Dense(fully_connected, activation=activation)(m) m = Dense(fully_connected, activation=activation)(m) m = Dense(n_classes, activation='softmax')(m) model = Model([mel_input, delta_input], m) return model
def dcnn_nodelta(bands=60, frames=31, n_classes=10, channels=1, fully_connected=5000, filters=80, activation='relu'): from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate import keras.layers input_shape = (bands, frames, channels) def head(input, name): return dcnn_head(input, name, filters) mel_input = Input(shape=input_shape, name='mel_input') m = head(mel_input, 'mel') m = Dense(fully_connected, activation=activation)(m) m = Dense(fully_connected, activation=activation)(m) m = Dense(n_classes, activation='softmax')(m) model = Model(mel_input, m) return model
def main(): m = dcnn() m.save('dcnn.hdf5') m.summary() m = dcnn_nodelta() m.save('dcnn.nodelta.hdf5') m.summary()
def build_model(bands=60, frames=41, channels=1, n_labels=10, dropout=0.0, depth=7, block=2, growth=15, pooling='avg', bottleneck=False, reduction=0.0, subsample=True): '\n DenseNet\n ' from keras_contrib.applications import densenet input_shape = (bands, frames, channels) model = densenet.DenseNet(input_shape=input_shape, pooling=pooling, depth=depth, nb_dense_block=block, growth_rate=growth, bottleneck=bottleneck, reduction=reduction, subsample_initial_block=subsample, include_top=True, classes=n_labels, dropout_rate=dropout) return model
def main(): m = build_model() m.save('densenet.hdf5') m.summary()
def dilaconv(bands=64, frames=41, channels=2, dilation=(2, 2), kernel=(3, 3), n_labels=10, dropout=0.5, kernels=[32, 32, 64, 64]): '\n Environmental sound classification with dilated convolutions\n\n https://www.sciencedirect.com/science/article/pii/S0003682X18306121\n ' from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, GlobalAveragePooling2D from keras.regularizers import l2 input_shape = (bands, frames, channels) conv = [Convolution2D(kernels[0], kernel, input_shape=input_shape, activation='relu')] for k in kernels[1:]: c = Convolution2D(k, kernel, dilation_rate=dilation, activation='relu') conv.append(c) model = Sequential((conv + [GlobalAveragePooling2D(), Dropout(dropout), Dense(n_labels, activation='softmax')])) return model
def main(): m = dilaconv() m.summary() m.save('dilaconv.hdf5') m = ldcnn() m.save('ldcnn.hdf5') m.summary() m = ldcnn_nodelta() m.save('ldcnn.nodelta.hdf5') m.summary()
def build_model(bands=128, frames=128, channels=2, n_classes=10, filters=80, L=57, W=6, fully_connected=5000): '\n Deep Convolutional Neural Network with Mixup for Environmental Sound Classification\n \n https://link.springer.com/chapter/10.1007/978-3-030-03335-4_31\n ' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate from keras.layers import Convolution2D, Flatten, MaxPooling2D import keras.layers input_shape = (bands, frames, channels) model = Sequential([Convolution2D(32, (3, 7), padding='same', input_shape=input_shape), Convolution2D(32, (3, 5), padding='same'), MaxPooling2D(pool_size=(4, 3)), Convolution2D(64, (3, 1), padding='same'), Convolution2D(64, (3, 1), padding='same'), MaxPooling2D(pool_size=(4, 1)), Convolution2D(128, (1, 5), padding='same'), Convolution2D(128, (1, 5), padding='same'), MaxPooling2D(pool_size=(1, 3)), Convolution2D(256, (3, 3), padding='same'), Convolution2D(256, (3, 3), padding='same'), MaxPooling2D(pool_size=(2, 2)), Dense(512, activation='relu'), Dense(n_classes, activation='softmax')]) return model
def main(): m = build_model() m.summary() m.save('dmix.orig.hdf5')
def get_post(x_in): x = Activation('relu')(x_in) x = BatchNormalization()(x) return x
def get_block(x_in, ch_in, ch_out, kernel=3, downsample=2, strides=(1, 1)): x = Conv2D(ch_in, kernel_size=(1, 1), strides=strides, padding='same', use_bias=False)(x_in) x = get_post(x) x = DepthwiseConv2D(kernel_size=(1, kernel), padding='same', use_bias=False)(x) x = get_post(x) x = MaxPool2D(pool_size=(downsample, 1), strides=(downsample, 1))(x) x = DepthwiseConv2D(kernel_size=(kernel, 1), padding='same', use_bias=False)(x) x = get_post(x) x = Conv2D(ch_out, kernel_size=(downsample, 1), strides=(1, downsample), padding='same', use_bias=False)(x) x = get_post(x) return x
def Effnet(input_shape, nb_classes, n_blocks=2, initial_filters=16, filter_growth=2.0, dropout=0.5, kernel=5, downsample=2, pool=None, include_top='flatten', weights=None): if getattr(kernel, '__iter__', None): assert (kernel[0] == kernel[1]) kernel = kernel[0] x_in = Input(shape=input_shape) x = x_in for block_no in range(n_blocks): filters_in = int((initial_filters * (filter_growth ** block_no))) filters_out = int((initial_filters * (filter_growth ** (block_no + 1)))) strides = ((2, 2) if (block_no == 0) else (1, 1)) x = get_block(x, filters_in, filters_out, kernel=kernel, downsample=downsample, strides=strides) if (include_top == 'flatten'): x = Flatten()(x) x = Dropout(dropout)(x) x = Dense(nb_classes, activation='softmax')(x) elif (include_top == 'conv'): x = GlobalAveragePooling2D()(x) shape = (1, 1, filters_out) x = Reshape(shape)(x) x = Dropout(dropout)(x) x = Conv2D(nb_classes, (1, 1), padding='same')(x) x = Activation('softmax', name='act_softmax')(x) x = Reshape((nb_classes,))(x) model = Model(inputs=x_in, outputs=x) if (weights is not None): model.load_weights(weights, by_name=True) return model
def build_model(frames=31, bands=60, channels=1, n_classes=10, **kwargs): shape = (bands, frames, channels) return Effnet(shape, nb_classes=n_classes, **kwargs)
def main(): m = build_model() m.summary() m.save('effnet.hdf5')
def ldcnn_head(input, head_name, filters=80, L=57, W=6): def n(base): return ((base + '_') + head_name) from keras.layers import Convolution2D, Flatten, MaxPooling2D, BatchNormalization x = input x = Convolution2D(filters, (L, 1), activation='relu', name=n('SFCL1'))(x) x = BatchNormalization()(x) x = Convolution2D(filters, (1, W), activation='relu', name=n('SFCL2'))(x) x = BatchNormalization()(x) x = MaxPooling2D(pool_size=(4, 3), strides=(1, 3), name=n('MPL1'))(x) x = Convolution2D(filters, (1, 3), dilation_rate=(2, 2), name=n('DCL'))(x) x = MaxPooling2D(pool_size=(1, 3), strides=(1, 3), name=n('MPL2'))(x) x = Flatten(name=n('flatten'))(x) return x
def ldcnn(bands=60, frames=31, n_classes=10, filters=80, L=57, W=6, fully_connected=5000, dropout=0.25): '\n LD-CNN: A Lightweight Dilated Convolutional Neural Network for Environmental Sound Classification\n \n http://epubs.surrey.ac.uk/849351/1/LD-CNN.pdf\n ' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate from keras.regularizers import l2 import keras.layers input_shape = (bands, frames, 1) def head(input, name): return ldcnn_head(input, name, filters, L, W) mel_input = Input(shape=input_shape, name='mel_input') delta_input = Input(shape=input_shape, name='delta_input') heads = [head(mel_input, 'mel'), head(delta_input, 'delta')] m = keras.layers.add(heads, name='FSL') m = Dropout(dropout)(m) m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m) m = Dropout(dropout)(m) m = Dense(n_classes, activation='softmax')(m) model = Model([mel_input, delta_input], m) return model
def ldcnn_nodelta(bands=60, frames=31, n_classes=10, filters=80, L=57, W=6, channels=1, fully_connected=5000, dropout=0.5): 'Variation of LD-CNN with only mel input (no deltas)' from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate from keras.regularizers import l2 input_shape = (bands, frames, channels) input = Input(shape=input_shape, name='mel_input') m = ldcnn_head(input, 'mel', filters, L, W) m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m) m = Dropout(dropout)(m) m = Dense(n_classes, kernel_regularizer=l2(0.001))(m) m = Dropout(dropout)(m) m = Activation('softmax')(m) model = Model(input, m) return model
def relu6(x, name): if False: x = layers.ReLU(6.0, name=name)(x) else: x = layers.Activation('relu')(x) return x
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): channel_axis = (1 if (backend.image_data_format() == 'channels_first') else (- 1)) filters = int((filters * alpha)) padding = ((0, (kernel[1] // 2)), (0, (kernel[1] // 2))) x = layers.ZeroPadding2D(padding=padding, name='conv1_pad')(inputs) x = layers.Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return relu6(x, name='conv1_relu')
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), kernel=(3, 3), block_id=1): channel_axis = (1 if (backend.image_data_format() == 'channels_first') else (- 1)) pointwise_conv_filters = int((pointwise_conv_filters * alpha)) layers = keras.layers if (strides == (1, 1)): x = inputs else: x = layers.ZeroPadding2D(((0, (kernel[1] // 2)), (0, (kernel[1] // 2))), name=('conv_pad_%d' % block_id))(inputs) x = layers.DepthwiseConv2D(kernel, padding=('same' if (strides == (1, 1)) else 'valid'), depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=('conv_dw_%d' % block_id))(x) x = layers.BatchNormalization(axis=channel_axis, name=('conv_dw_%d_bn' % block_id))(x) x = relu6(x, name=('conv_dw_%d_relu' % block_id)) x = layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=('conv_pw_%d' % block_id))(x) x = layers.BatchNormalization(axis=channel_axis, name=('conv_pw_%d_bn' % block_id))(x) return relu6(x, name=('conv_pw_%d_relu' % block_id))
def build_model(frames=32, bands=32, channels=1, n_classes=10, dropout=0.5, depth_multiplier=1, alpha=0.5, n_stages=2, initial_filters=24, kernel=(5, 5), pool=(2, 2)): '\n ' (stride_f, stride_t) = pool from keras.applications import mobilenet conv = _conv_block dwconv = _depthwise_conv_block assert (keras.backend.image_data_format() == 'channels_last') input_shape = (bands, frames, channels) img_input = keras.layers.Input(shape=input_shape) x = conv(img_input, initial_filters, alpha, kernel=kernel, strides=(2, 2)) x = dwconv(x, (initial_filters * 2), alpha, depth_multiplier, block_id=1) for stage_no in range(1, n_stages): filters = (initial_filters * (2 ** stage_no)) x = dwconv(x, filters, alpha, depth_multiplier, kernel=kernel, strides=(stride_f, stride_t), block_id=(stage_no * 2)) x = dwconv(x, filters, alpha, depth_multiplier, kernel=kernel, block_id=((stage_no * 2) + 1)) shape = (1, 1, int((filters * alpha))) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Reshape(shape, name='reshape_1')(x) x = keras.layers.Dropout(dropout, name='dropout')(x) x = keras.layers.Conv2D(n_classes, (1, 1), padding='same', name='conv_preds')(x) x = keras.layers.Activation('softmax', name='act_softmax')(x) x = keras.layers.Reshape((n_classes,), name='reshape_2')(x) model = keras.Model(img_input, x) return model
def build_model(bands=60, frames=41, channels=2, n_labels=10, fc=5000, dropout=0.5): '\n Implements the short-segment CNN from\n\n ENVIRONMENTAL SOUND CLASSIFICATION WITH CONVOLUTIONAL NEURAL NETWORKS\n Karol J. Piczak, 2015.\n https://karol.piczak.com/papers/Piczak2015-ESC-ConvNet.pdf\n ' from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.regularizers import l2 input_shape = (bands, frames, channels) model = Sequential([Convolution2D(80, ((bands - 3), 6), strides=(1, 1), input_shape=input_shape), MaxPooling2D((4, 3), strides=(1, 3)), Convolution2D(80, (1, 3)), MaxPooling2D((1, 3), strides=(1, 3)), Dense(fc, activation='relu'), Dropout(dropout), Dense(fc, activation='relu'), Dropout(dropout), Dense(n_labels, activation='softmax')]) return model
def main(): m = build_model() m.save('piczak.orig.hdf5') m.summary()
def build_model(frames=128, bands=128, channels=1, n_classes=10, conv_size=(5, 5), conv_block='conv', downsample_size=(4, 2), fully_connected=64, n_stages=None, n_blocks_per_stage=None, filters=24, kernels_growth=2, dropout=0.5, use_strides=False): '\n Implements SB-CNN model from\n Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification\n Salamon and Bello, 2016.\n https://arxiv.org/pdf/1608.04363.pdf\n\n Based on https://gist.github.com/jaron/5b17c9f37f351780744aefc74f93d3ae\n but parameters are changed back to those of the original paper authors,\n and added Batch Normalization\n ' Conv2 = (SeparableConv2D if (conv_block == 'depthwise_separable') else Convolution2D) assert (conv_block in ('conv', 'depthwise_separable')) kernel = conv_size if use_strides: strides = downsample_size pool = (1, 1) else: strides = (1, 1) pool = downsample_size block1 = [Convolution2D(filters, kernel, padding='same', strides=strides, input_shape=(bands, frames, channels)), BatchNormalization(), MaxPooling2D(pool_size=pool), Activation('relu')] block2 = [Conv2((filters * kernels_growth), kernel, padding='same', strides=strides), BatchNormalization(), MaxPooling2D(pool_size=pool), Activation('relu')] block3 = [Conv2((filters * kernels_growth), kernel, padding='valid', strides=strides), BatchNormalization(), Activation('relu')] backend = [Flatten(), Dropout(dropout), Dense(fully_connected, kernel_regularizer=l2(0.001)), Activation('relu'), Dropout(dropout), Dense(n_classes, kernel_regularizer=l2(0.001)), Activation('softmax')] layers = (((block1 + block2) + block3) + backend) model = Sequential(layers) return model
def build_model(frames=172, shingles=8, bands=40, channels=1, codebook=2000): '\n Implements convolution part of SKM model from\n\n UNSUPERVISED FEATURE LEARNING FOR URBAN SOUND CLASSIFICATION\n Justin Salamon and Juan Pablo Bello, 2015\n ' input_shape = (bands, frames, channels) kernel = (bands, shingles) model = Sequential([Convolution2D(codebook, kernel, strides=(1, shingles), padding='same', activation=None, input_shape=input_shape)]) return model
def main(): print('original') m = build_model() m.summary()
def build_tiny_conv(input_frames, input_bins, n_classes=12, dropout=0.5): '\n Ported from Tensorflow examples. create_tiny_conv_model\n ' from keras.layers import Conv2D, Dense, Dropout, Flatten input_shape = (input_bins, input_frames, 1) model = keras.Sequential([Conv2D(8, (8, 10), strides=(2, 2), padding='same', activation='relu', use_bias=True, input_shape=input_shape), Dropout(dropout), Flatten(), Dense(n_classes, activation='softmax', use_bias=True)]) return model
def build_one(frames=64, bands=40, n_classes=10, dropout=0.0, tstride=1, fstride=4): "\n Ported from Tensorflow examples. create_low_latency_conv\n\n This is roughly the network labeled as 'cnn-one-fstride4' in the\n 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:\n http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf\n " from keras.layers import Conv2D, Dense, Dropout, Flatten conv_f = 8 conv_t = 32 kernels = 90 bottleneck = 32 input_shape = (frames, bands, 1) model = keras.Sequential([Conv2D(kernels, (conv_t, conv_f), strides=(tstride, fstride), padding='valid', activation='relu', use_bias=True, input_shape=input_shape), Dense(bottleneck, activation=None, use_bias=True), Dropout(dropout), Dense(128, activation='relu', use_bias=True), Dropout(dropout), Dense(128, activation='relu', use_bias=True), Dropout(dropout), Dense(n_classes, activation='softmax', use_bias=True)]) return model
def build_low_latency_conv(input_frames, input_bins, n_classes=12, dropout=0.5): "\n Ported from Tensorflow examples. create_low_latency_conv\n\n This is roughly the network labeled as 'cnn-one-fstride4' in the\n 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:\n http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf\n " from keras.layers import Conv2D, Dense, Dropout, Flatten input_shape = (input_frames, input_bins, 1) model = keras.Sequential([Conv2D(186, (input_frames, 8), strides=(1, 1), padding='valid', activation='relu', use_bias=True, input_shape=input_shape), Dropout(dropout), Flatten(), Dense(128, activation=None, use_bias=True), Dropout(dropout), Dense(128, activation=None, use_bias=True), Dropout(dropout), Dense(n_classes, activation='softmax', use_bias=True)]) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) return model
def build_aclnet_lowlevel(input_samples, c1=32, s1=8, s2=4, input_tensor=None): '\n\n The following values were tested in the paper.\n c1= 8,16,32\n s1= 2,4,8\n s2= 2,4\n ' from keras.layers import Conv1D, MaxPooling1D, InputLayer, Flatten, Dense input_shape = (input_samples, 1) model = keras.Sequential([InputLayer(input_shape=input_shape, input_tensor=input_tensor), Conv1D(filters=c1, kernel_size=9, strides=s1, padding='valid', activation=None, use_bias=False), Conv1D(filters=64, kernel_size=5, strides=s2, padding='valid', activation=None, use_bias=False), MaxPooling1D(pool_size=(int((160 / (s2 * s1))),), padding='valid', data_format='channels_last'), Flatten(), Dense(1, activation=None)]) return model
def main(): m = build_low_latency_conv(41, 40) m.summary() m = build_tiny_conv(32, 40) m.summary() m = build_one() m.summary()
def fire_module(x, fire_id, squeeze=16, expand=64): sq1x1 = 'squeeze1x1' exp1x1 = 'expand1x1' exp3x3 = 'expand3x3' relu = 'relu_' s_id = (('fire' + str(fire_id)) + '/') from keras.layers import concatenate x = Convolution2D(squeeze, (1, 1), padding='valid', name=(s_id + sq1x1))(x) x = Activation('relu', name=((s_id + relu) + sq1x1))(x) left = Convolution2D(expand, (1, 1), padding='valid', name=(s_id + exp1x1))(x) left = Activation('relu', name=((s_id + relu) + exp1x1))(left) right = Convolution2D(expand, (3, 3), padding='same', name=(s_id + exp3x3))(x) right = Activation('relu', name=((s_id + relu) + exp3x3))(right) assert (keras.backend.image_data_format() == 'channels_last') x = concatenate([left, right], axis=3, name=(s_id + 'concat')) return x
def build_model(frames=32, bands=32, channels=1, n_classes=10, dropout=0.5, n_stages=3, modules_per_stage=2, initial_filters=64, squeeze_ratio=0.2, pool=(2, 2), kernel=(3, 3), stride_f=2, stride_t=2): from keras.models import Model from keras.layers import Input, GlobalAveragePooling2D, Dropout, MaxPooling2D input_shape = (bands, frames, channels) img_input = keras.layers.Input(shape=input_shape) x = Convolution2D(initial_filters, (3, 3), strides=(stride_f, stride_t), padding='valid', name='conv1')(img_input) x = Activation('relu', name='relu_conv1')(x) module_idx = 0 for stage_no in range(1, n_stages): expand = (initial_filters * stage_no) squeeze = int((expand * squeeze_ratio)) x = MaxPooling2D(pool_size=pool, strides=(stride_f, stride_t), name=('pool' + str(stage_no)))(x) for module_no in range(modules_per_stage): x = fire_module(x, fire_id=module_idx, squeeze=squeeze, expand=expand) module_idx += 1 x = Dropout(dropout, name='drop9')(x) x = Convolution2D(n_classes, (1, 1), padding='valid', name='topconv')(x) x = Activation('relu', name='relu_topconv')(x) x = GlobalAveragePooling2D()(x) x = Activation('softmax', name='loss')(x) model = keras.Model(img_input, x) return model
def add_common(x, name): x = BatchNormalization(name=(name + '_bn'))(x) x = Activation('relu', name=(name + '_relu'))(x) return x
def conv(x, kernel, filters, downsample, name, padding='same'): 'Regular convolutional block' x = Conv2D(filters, kernel, strides=downsample, name=name, padding=padding)(x) return add_common(x, name)
def conv_ds(x, kernel, filters, downsample, name, padding='same'): 'Depthwise Separable convolutional block\n (Depthwise->Pointwise)\n\n MobileNet style' x = SeparableConv2D(filters, kernel, padding=padding, strides=downsample, name=(name + '_ds'))(x) return add_common(x, name=(name + '_ds'))
def conv_bottleneck_ds(x, kernel, filters, downsample, name, padding='same', bottleneck=0.5): '\n Bottleneck -> Depthwise Separable\n (Pointwise->Depthwise->Pointswise)\n\n MobileNetV2 style\n ' if (padding == 'valid'): pad = ((0, (kernel[0] // 2)), (0, (kernel[0] // 2))) x = ZeroPadding2D(padding=pad, name=(name + 'pad'))(x) x = Conv2D(int((filters * bottleneck)), (1, 1), padding='same', strides=downsample, name=(name + '_pw'))(x) add_common(x, (name + '_pw')) x = SeparableConv2D(filters, kernel, padding=padding, strides=(1, 1), name=(name + '_ds'))(x) return add_common(x, (name + '_ds'))
def conv_effnet(x, kernel, filters, downsample, name, bottleneck=0.5, strides=(1, 1), padding='same', bias=False): 'Pointwise -> Spatially Separable conv&pooling \n Effnet style' assert (downsample[0] == downsample[1]) downsample = downsample[0] assert (kernel[0] == kernel[1]) kernel = kernel[0] ch_in = int((filters * bottleneck)) ch_out = filters if (padding == 'valid'): pad = ((0, (kernel // 2)), (0, (kernel // 2))) x = ZeroPadding2D(padding=pad, name=(name + 'pad'))(x) x = Conv2D(ch_in, (1, 1), strides=downsample, padding=padding, use_bias=bias, name=(name + 'pw'))(x) x = add_common(x, name=(name + 'pw')) x = DepthwiseConv2D((1, kernel), padding=padding, use_bias=bias, name=(name + 'dwv'))(x) x = add_common(x, name=(name + 'dwv')) x = DepthwiseConv2D((kernel, 1), padding='same', use_bias=bias, name=(name + 'dwh'))(x) x = add_common(x, name=(name + 'dwh')) x = Conv2D(ch_out, (1, 1), padding=padding, use_bias=bias, name=(name + 'rh'))(x) return add_common(x, name=(name + 'rh'))
def backend_dense1(x, n_classes, fc=64, regularization=0.001, dropout=0.5): from keras.regularizers import l2 '\n SB-CNN style classification backend\n ' x = Flatten()(x) x = Dropout(dropout)(x) x = Dense(fc, kernel_regularizer=l2(regularization))(x) x = Activation('relu')(x) x = Dropout(dropout)(x) x = Dense(n_classes, kernel_regularizer=l2(regularization))(x) x = Activation('softmax')(x) return x
def build_model(frames=128, bands=128, channels=1, n_classes=10, conv_size=(5, 5), conv_block='conv', downsample_size=(2, 2), n_stages=3, n_blocks_per_stage=1, filters=24, kernels_growth=1.5, fully_connected=64, dropout=0.5, l2=0.001): '\n \n ' input = Input(shape=(bands, frames, channels)) x = input block_no = 0 for stage_no in range(0, n_stages): for b_no in range(0, n_blocks_per_stage): padding = ('valid' if (block_no == ((n_stages * n_blocks_per_stage) - 1)) else 'same') downsample = (downsample_size if (b_no == 0) else (1, 1)) conv_func = (conv if (block_no == 0) else block_types.get(conv_block)) name = 'conv{}'.format(block_no) x = conv_func(x, conv_size, int(filters), downsample, name=name, padding=padding) block_no += 1 filters = (filters * kernels_growth) x = backend_dense1(x, n_classes, fully_connected, regularization=l2) model = Model(input, x) return model
def plot(): models = pandas.read_csv('models.csv') (fig, ax) = plt.subplots(1) print(models.head(10)) print(models.index) n_labels = len(models['name']) colors = matplotlib.cm.rainbow(numpy.linspace(0, 1, n_labels)) for (i, r) in models.iterrows(): ax.plot((r['parameters'] / 1000), r['accuracy'], 'o', label=r['name'], markersize=5, color=colors[i], linewidth=0.1) ax.legend(loc='best') fig.savefig('perf.png')
def augmentations(audio, sr): ts = [0.81, 0.93, 1.07, 1.23] ps = [(- 2), (- 1), 1, 2, (- 3.5), (- 2.5), 2.5, 3.5] out = {} for stretch in ts: name = 'ts{:.2f}'.format(stretch) out[name] = librosa.effects.time_stretch(audio, stretch) for shift in ps: name = 'ps{:.2f}'.format(shift) out[name] = librosa.effects.pitch_shift(audio, sr, shift) return out
def compute(inp, outp, settings, force): sr = settings['samplerate'] _lazy_y = None def load(): nonlocal _lazy_y if (_lazy_y is None): (_lazy_y, _sr) = librosa.load(inp, sr=sr) assert (_sr == sr), _sr return _lazy_y exists = os.path.exists(outp) size = 0 if exists: size = os.stat(outp).st_size valid = (exists and (size > 0)) if ((not valid) or force): start_time = time.time() y = load() loaded_time = time.time() f = features.compute_mels(y, settings) computed_time = time.time() numpy.savez(outp, f) saved_time = time.time() if settings['augmentations']: paths = [outp.replace('.npz', '.aug{}.npz'.format(aug)) for aug in range(12)] exists = [os.path.exists(p) for p in paths] if ((not all(exists)) or force): y = load() augmented = augmentations(y, sr).values() assert (settings['augmentations'] == 12) assert (len(augmented) == settings['augmentations']), len(augmented) for (aug, (augdata, path)) in enumerate(zip(augmented, paths)): f = features.compute_mels(augdata, settings) numpy.savez(path, f) return outp
def precompute(samples, settings, out_dir, n_jobs=8, verbose=1, force=False): out_folder = out_dir def job_spec(sample): path = urbansound8k.sample_path(sample) out_path = features.feature_path(sample, out_folder) f = os.path.split(out_path)[0] if (not os.path.exists(f)): os.makedirs(f) return (path, out_path, settings, force) jobs = [joblib.delayed(compute)(*job_spec(sample)) for (_, sample) in samples.iterrows()] feature_files = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)(jobs)
def parse(): import argparse parser = argparse.ArgumentParser(description='Preprocess audio into features') common.add_arguments(parser) a = parser.add_argument a('--archive', dest='archive_dir', default='', help='') a('--jobs', type=int, default=8, help='Number of parallel jobs') a('--force', type=bool, default=False, help='Always recompute features') parsed = parser.parse_args() return parsed
def main(): args = parse() archive = args.archive_dir urbansound8k.default_path = os.path.join(args.datasets_dir, 'UrbanSound8K/') urbansound8k.maybe_download_dataset(args.datasets_dir) data = urbansound8k.load_dataset() settings = common.load_settings_path(args.settings_path) settings = features.settings(settings) features_path = os.path.join(args.features_dir, features.settings_id(settings)) common.ensure_directories(features_path) precompute(data, settings, out_dir=features_path, verbose=2, force=args.force, n_jobs=args.jobs) if archive: print('Archiving as {}.zip'.format(features_path)) shutil.make_archive(archive_path, 'zip', features_path)
def populate_defaults(): s = {} for n in names: v = default_model_settings.get(n, None) if (v is None): v = default_training_settings.get(n, None) if (v is None): v = default_feature_settings.get(n, None) s[n] = v return s
def test_no_overlapping_settings(): f = default_feature_settings.keys() t = default_training_settings.keys() m = default_model_settings.keys() assert (len(names) == ((len(f) + len(t)) + len(m)))
def parse_dimensions(s): pieces = s.split('x') return tuple((int(d) for d in pieces))
def test_parse_dimensions(): valid_examples = [('3x3', (3, 3)), ('4x2', (4, 2))] for (inp, expect) in valid_examples: out = parse_dimensions(inp) assert (out == expect), (out, '!=', expect)
def load_settings(args): settings = {} for key in names: string = args.get(key, defaults[key]) parser = parsers.get(key, (lambda x: x)) value = parser(string) settings[key] = value return settings
def test_settings_empty(): load_settings({})
def add_arguments(parser): a = parser.add_argument for name in names: data_type = type(defaults[name]) default = None a('--{}'.format(name), default=default, type=data_type, help='%(default)s')
def compute_conv2d(in_h, in_w, in_ch, out_ch, k_w, k_h): 'Compute complexity for standard Conv2D\n\n ' return ((((in_h * in_w) * in_ch) * out_ch) * (k_w * k_h))
def compute_conv2d_pw(in_h, in_w, in_ch, out_ch): 'Compute complexity for Pointwise (1x1) Conv2D\n\n $$ O_{pw} = HWNM $$\n ' return (((in_h * in_w) * in_ch) * out_ch)
def compute_conv2d_dw(in_h, in_w, in_ch, k_w, k_h): 'Compute complexity for Depthwise Conv2D\n\n $$ O_{dw} = HWNK_wK_h $$\n ' return (((in_h * in_w) * in_ch) * (k_w * k_h))