code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def get_adv_losses(discriminator_real_outputs, discriminator_fake_outputs, kind): """Return the corresponding GAN losses for the generator and the discriminator.""" if kind == 'classic': loss_fn = classic_gan_losses elif kind == 'nonsaturating': loss_fn = nonsaturating_gan_losses elif kind == 'wasserstein': loss_fn = wasserstein_gan_losses elif kind == 'hinge': loss_fn = hinge_gan_losses return loss_fn(discriminator_real_outputs, discriminator_fake_outputs)
Return the corresponding GAN losses for the generator and the discriminator.
get_adv_losses
python
salu133445/musegan
src/musegan/losses.py
https://github.com/salu133445/musegan/blob/master/src/musegan/losses.py
MIT
def classic_gan_losses(discriminator_real_outputs, discriminator_fake_outputs): """Return the classic GAN losses for the generator and the discriminator. (Generator) log(1 - sigmoid(D(G(z)))) (Discriminator) - log(sigmoid(D(x))) - log(1 - sigmoid(D(G(z)))) """ discriminator_loss_real = tf.losses.sigmoid_cross_entropy( tf.ones_like(discriminator_real_outputs), discriminator_real_outputs) discriminator_loss_fake = tf.losses.sigmoid_cross_entropy( tf.zeros_like(discriminator_fake_outputs), discriminator_fake_outputs) discriminator_loss = discriminator_loss_real + discriminator_loss_fake generator_loss = -discriminator_loss return generator_loss, discriminator_loss
Return the classic GAN losses for the generator and the discriminator. (Generator) log(1 - sigmoid(D(G(z)))) (Discriminator) - log(sigmoid(D(x))) - log(1 - sigmoid(D(G(z))))
classic_gan_losses
python
salu133445/musegan
src/musegan/losses.py
https://github.com/salu133445/musegan/blob/master/src/musegan/losses.py
MIT
def nonsaturating_gan_losses(discriminator_real_outputs, discriminator_fake_outputs): """Return the non-saturating GAN losses for the generator and the discriminator. (Generator) -log(sigmoid(D(G(z)))) (Discriminator) -log(sigmoid(D(x))) - log(1 - sigmoid(D(G(z)))) """ discriminator_loss_real = tf.losses.sigmoid_cross_entropy( tf.ones_like(discriminator_real_outputs), discriminator_real_outputs) discriminator_loss_fake = tf.losses.sigmoid_cross_entropy( tf.zeros_like(discriminator_fake_outputs), discriminator_fake_outputs) discriminator_loss = discriminator_loss_real + discriminator_loss_fake generator_loss = tf.losses.sigmoid_cross_entropy( tf.ones_like(discriminator_fake_outputs), discriminator_fake_outputs) return generator_loss, discriminator_loss
Return the non-saturating GAN losses for the generator and the discriminator. (Generator) -log(sigmoid(D(G(z)))) (Discriminator) -log(sigmoid(D(x))) - log(1 - sigmoid(D(G(z))))
nonsaturating_gan_losses
python
salu133445/musegan
src/musegan/losses.py
https://github.com/salu133445/musegan/blob/master/src/musegan/losses.py
MIT
def wasserstein_gan_losses(discriminator_real_outputs, discriminator_fake_outputs): """Return the Wasserstein GAN losses for the generator and the discriminator. (Generator) -D(G(z)) (Discriminator) D(G(z)) - D(x) """ generator_loss = -tf.reduce_mean(discriminator_fake_outputs) discriminator_loss = -generator_loss - tf.reduce_mean( discriminator_real_outputs) return generator_loss, discriminator_loss
Return the Wasserstein GAN losses for the generator and the discriminator. (Generator) -D(G(z)) (Discriminator) D(G(z)) - D(x)
wasserstein_gan_losses
python
salu133445/musegan
src/musegan/losses.py
https://github.com/salu133445/musegan/blob/master/src/musegan/losses.py
MIT
def hinge_gan_losses(discriminator_real_outputs, discriminator_fake_outputs): """Return the Hinge GAN losses for the generator and the discriminator. (Generator) -D(G(z)) (Discriminator) max(0, 1 - D(x)) + max(0, 1 + D(G(z))) """ generator_loss = -tf.reduce_mean(discriminator_fake_outputs) discriminator_loss = ( tf.reduce_mean(tf.nn.relu(1. - discriminator_real_outputs)) + tf.reduce_mean(tf.nn.relu(1. + discriminator_fake_outputs))) return generator_loss, discriminator_loss
Return the Hinge GAN losses for the generator and the discriminator. (Generator) -D(G(z)) (Discriminator) max(0, 1 - D(x)) + max(0, 1 + D(G(z)))
hinge_gan_losses
python
salu133445/musegan
src/musegan/losses.py
https://github.com/salu133445/musegan/blob/master/src/musegan/losses.py
MIT
def to_chroma(pianoroll): """Return the chroma features (not normalized).""" if pianoroll.get_shape().ndims != 5: raise ValueError("Input tensor must have 5 dimensions.") remainder = pianoroll.get_shape()[3] % 12 if remainder: pianoroll = tf.pad( pianoroll, ((0, 0), (0, 0), (0, 0), (0, 12 - remainder), (0, 0))) reshaped = tf.reshape( pianoroll, (-1, pianoroll.get_shape()[1], pianoroll.get_shape()[2], 12, pianoroll.get_shape()[3] // 12 + int(remainder > 0), pianoroll.get_shape()[4])) return tf.reduce_sum(reshaped, 4)
Return the chroma features (not normalized).
to_chroma
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def empty_bar_rate(tensor): """Return the ratio of empty bars to the total number of bars.""" if tensor.get_shape().ndims != 5: raise ValueError("Input tensor must have 5 dimensions.") return tf.reduce_mean( tf.cast(tf.reduce_any(tensor > 0.5, (2, 3)), tf.float32), (0, 1))
Return the ratio of empty bars to the total number of bars.
empty_bar_rate
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def n_pitches_used(tensor): """Return the number of unique pitches used per bar.""" if tensor.get_shape().ndims != 5: raise ValueError("Input tensor must have 5 dimensions.") return tf.reduce_mean(tf.reduce_sum(tf.count_nonzero(tensor, 3), 2), [0, 1])
Return the number of unique pitches used per bar.
n_pitches_used
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def qualified_note_rate(tensor, threshold=2): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.""" if tensor.get_shape().ndims != 5: raise ValueError("Input tensor must have 5 dimensions.") def _qualified_note_rate(array, threshold): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.""" n_tracks = array.shape[-1] reshaped = array.reshape(-1, array.shape[1] * array.shape[2], array.shape[3], array.shape[4]) padded = np.pad(reshaped.astype(int), ((0, 0), (1, 1), (0, 0), (0, 0)), 'constant') diff = np.diff(padded, axis=1) transposed = diff.transpose(3, 0, 1, 2).reshape(n_tracks, -1) onsets = (transposed > 0).nonzero() offsets = (transposed < 0).nonzero() n_qualified_notes = np.array([np.count_nonzero( offsets[1][(offsets[0] == i)] - onsets[1][(onsets[0] == i)] >= threshold) for i in range(n_tracks)], np.float32) n_onsets = np.array([np.count_nonzero(onsets[1][(onsets[0] == i)]) for i in range(n_tracks)], np.float32) with np.errstate(divide='ignore', invalid='ignore'): return n_qualified_notes / n_onsets return tf.py_func(lambda array: _qualified_note_rate(array, threshold), [tensor], tf.float32)
Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.
qualified_note_rate
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def _qualified_note_rate(array, threshold): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.""" n_tracks = array.shape[-1] reshaped = array.reshape(-1, array.shape[1] * array.shape[2], array.shape[3], array.shape[4]) padded = np.pad(reshaped.astype(int), ((0, 0), (1, 1), (0, 0), (0, 0)), 'constant') diff = np.diff(padded, axis=1) transposed = diff.transpose(3, 0, 1, 2).reshape(n_tracks, -1) onsets = (transposed > 0).nonzero() offsets = (transposed < 0).nonzero() n_qualified_notes = np.array([np.count_nonzero( offsets[1][(offsets[0] == i)] - onsets[1][(onsets[0] == i)] >= threshold) for i in range(n_tracks)], np.float32) n_onsets = np.array([np.count_nonzero(onsets[1][(onsets[0] == i)]) for i in range(n_tracks)], np.float32) with np.errstate(divide='ignore', invalid='ignore'): return n_qualified_notes / n_onsets
Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.
_qualified_note_rate
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def polyphonic_rate(tensor, threshold=2): """Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps""" if tensor.get_shape().ndims != 5: raise ValueError("Input tensor must have 5 dimensions.") n_poly = tf.count_nonzero((tf.count_nonzero(tensor, 3) > threshold), 2) return tf.reduce_mean((n_poly / tensor.get_shape()[2]), [0, 1])
Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps
polyphonic_rate
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def _drum_pattern_mask(n_timesteps, tolerance=0.1): """Return a drum pattern mask with the given tolerance.""" if n_timesteps not in (96, 48, 24, 72, 36, 64, 32, 16): raise ValueError("Unsupported number of timesteps for the drum in " "pattern metric.") if n_timesteps == 96: drum_pattern_mask = np.tile( [1., tolerance, 0., 0., 0., tolerance], 16) elif n_timesteps == 48: drum_pattern_mask = np.tile([1., tolerance, tolerance], 16) elif n_timesteps == 24: drum_pattern_mask = np.tile([1., tolerance, tolerance], 8) elif n_timesteps == 72: drum_pattern_mask = np.tile( [1., tolerance, 0., 0., 0., tolerance], 12) elif n_timesteps == 36: drum_pattern_mask = np.tile([1., tolerance, tolerance], 12) elif n_timesteps == 64: drum_pattern_mask = np.tile([1., tolerance, 0., tolerance], 16) elif n_timesteps == 32: drum_pattern_mask = np.tile([1., tolerance], 16) elif n_timesteps == 16: drum_pattern_mask = np.tile([1., tolerance], 8) return drum_pattern_mask
Return a drum pattern mask with the given tolerance.
_drum_pattern_mask
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def _scale_mask(key=3): """Return a scale mask for the given key. Default to C major scale.""" a_scale_mask = np.array([[[1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0]]], bool) return np.expand_dims(np.roll(a_scale_mask, -key, 2), -1)
Return a scale mask for the given key. Default to C major scale.
_scale_mask
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def _tonal_matrix(r1=1.0, r2=1.0, r3=0.5): """Compute and return a tonal matrix for computing the tonal distance [1]. Default argument values are set as suggested by the paper. [1] Christopher Harte, Mark Sandler, and Martin Gasser. Detecting harmonic change in musical audio. In Proc. ACM MM Workshop on Audio and Music Computing Multimedia, 2006. """ tonal_matrix = np.empty((6, 12)) tonal_matrix[0] = r1 * np.sin(np.arange(12) * (7. / 6.) * np.pi) tonal_matrix[1] = r1 * np.cos(np.arange(12) * (7. / 6.) * np.pi) tonal_matrix[2] = r2 * np.sin(np.arange(12) * (3. / 2.) * np.pi) tonal_matrix[3] = r2 * np.cos(np.arange(12) * (3. / 2.) * np.pi) tonal_matrix[4] = r3 * np.sin(np.arange(12) * (2. / 3.) * np.pi) tonal_matrix[5] = r3 * np.cos(np.arange(12) * (2. / 3.) * np.pi) return tonal_matrix
Compute and return a tonal matrix for computing the tonal distance [1]. Default argument values are set as suggested by the paper. [1] Christopher Harte, Mark Sandler, and Martin Gasser. Detecting harmonic change in musical audio. In Proc. ACM MM Workshop on Audio and Music Computing Multimedia, 2006.
_tonal_matrix
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def _to_tonal_space(tensor): """Return the tensor in tonal space where chroma features are normalized per beat.""" tonal_matrix = tf.constant(_tonal_matrix(), tf.float32) beat_chroma = tf.reduce_sum(tf.reshape( tensor, (-1, beat_resolution, 12, tensor.get_shape()[4])), 1) beat_chroma = beat_chroma / tf.reduce_sum(beat_chroma, 1, True) reshaped = tf.reshape(tf.transpose(beat_chroma, (1, 0, 2)), (12, -1)) return tf.reshape( tf.matmul(tonal_matrix, reshaped), (6, -1, tensor.get_shape()[4]))
Return the tensor in tonal space where chroma features are normalized per beat.
_to_tonal_space
python
salu133445/musegan
src/musegan/metrics.py
https://github.com/salu133445/musegan/blob/master/src/musegan/metrics.py
MIT
def get_train_nodes(self, x, z=None, y=None, c=None, params=None, config=None): """Return a dictionary of graph nodes for training.""" LOGGER.info("Building training nodes.") with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE) as scope: nodes = {} # Get or create global step global_step = tf.train.get_or_create_global_step() nodes['gen_step'] = tf.get_variable( 'gen_step', [], tf.int32, tf.constant_initializer(0), trainable=False) # Set default latent distribution if not given if z is None: nodes['z'] = tf.truncated_normal(( config['batch_size'], params['latent_dim'])) else: nodes['z'] = z # Get slope tensor (for straight-through estimators) nodes['slope'] = tf.get_variable( 'slope', [], tf.float32, tf.constant_initializer(1.0), trainable=False) # --- Generator output --------------------------------------------- if params['use_binary_neurons']: if params.get('is_accompaniment'): nodes['fake_x'], nodes['fake_x_preactivated'] = self.gen( nodes['z'], y, c, True, nodes['slope']) else: nodes['fake_x'], nodes['fake_x_preactivated'] = self.gen( nodes['z'], y, True, nodes['slope']) else: if params.get('is_accompaniment'): nodes['fake_x'] = self.gen(nodes['z'], y, c, True) else: nodes['fake_x'] = self.gen(nodes['z'], y, True) # --- Slope annealing ---------------------------------------------- if config['use_slope_annealing']: slope_schedule = config['slope_schedule'] scheduled_slope = get_scheduled_variable( 1.0, slope_schedule['end_value'], slope_schedule['start'], slope_schedule['end']) tf.add_to_collection( tf.GraphKeys.UPDATE_OPS, tf.assign(nodes['slope'], scheduled_slope)) # --- Discriminator output ----------------------------------------- nodes['dis_real'] = self.dis(x, y, True) nodes['dis_fake'] = self.dis(nodes['fake_x'], y, True) # ============================= Losses ============================= LOGGER.info("Building losses.") # --- Adversarial losses ------------------------------------------- nodes['gen_loss'], nodes['dis_loss'] = get_adv_losses( nodes['dis_real'], nodes['dis_fake'], config['gan_loss_type']) # --- Gradient penalties ------------------------------------------- if config['use_gradient_penalties']: eps_x = tf.random_uniform( [config['batch_size']] + [1] * len(params['data_shape'])) inter_x = eps_x * x + (1.0 - eps_x) * nodes['fake_x'] dis_x_inter_out = self.dis(inter_x, y, True) gradient_x = tf.gradients(dis_x_inter_out, inter_x)[0] slopes_x = tf.sqrt(1e-8 + tf.reduce_sum( tf.square(gradient_x), np.arange(1, gradient_x.get_shape().ndims))) gradient_penalty_x = tf.reduce_mean(tf.square(slopes_x - 1.0)) nodes['dis_loss'] += 10.0 * gradient_penalty_x # Compute total loss (for logging and detecting NAN values only) nodes['loss'] = nodes['gen_loss'] + nodes['dis_loss'] # ========================== Training ops ========================== LOGGER.info("Building training ops.") # --- Learning rate decay ------------------------------------------ nodes['learning_rate'] = tf.get_variable( 'learning_rate', [], tf.float32, tf.constant_initializer(config['initial_learning_rate']), trainable=False) if config['use_learning_rate_decay']: scheduled_learning_rate = get_scheduled_variable( config['initial_learning_rate'], config['learning_rate_schedule']['end_value'], config['learning_rate_schedule']['start'], config['learning_rate_schedule']['end']) tf.add_to_collection( tf.GraphKeys.UPDATE_OPS, tf.assign(nodes['learning_rate'], scheduled_learning_rate)) # --- Optimizers --------------------------------------------------- gen_opt = tf.train.AdamOptimizer( nodes['learning_rate'], config['adam']['beta1'], config['adam']['beta2']) dis_opt = tf.train.AdamOptimizer( nodes['learning_rate'], config['adam']['beta1'], config['adam']['beta2']) # --- Training ops ------------------------------------------------- nodes['train_ops'] = {} # Training op for the discriminator nodes['train_ops']['dis'] = dis_opt.minimize( nodes['dis_loss'], global_step, tf.trainable_variables(scope.name + '/' + self.dis.name)) # Training ops for the generator update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) gen_step_increment = tf.assign_add(nodes['gen_step'], 1) with tf.control_dependencies(update_ops + [gen_step_increment]): nodes['train_ops']['gen'] = gen_opt.minimize( nodes['gen_loss'], global_step, tf.trainable_variables(scope.name + '/' + self.gen.name)) # =========================== Summaries ============================ LOGGER.info("Building summaries.") if config['save_summaries_steps'] > 0: with tf.name_scope('losses'): tf.summary.scalar('gen_loss', nodes['gen_loss']) tf.summary.scalar('dis_loss', nodes['dis_loss']) if config['use_learning_rate_decay']: with tf.name_scope('learning_rate_decay'): tf.summary.scalar( 'learning_rate', nodes['learning_rate']) if config['use_slope_annealing']: with tf.name_scope('slope_annealing'): tf.summary.scalar('slope', nodes['slope']) return nodes
Return a dictionary of graph nodes for training.
get_train_nodes
python
salu133445/musegan
src/musegan/model.py
https://github.com/salu133445/musegan/blob/master/src/musegan/model.py
MIT
def get_predict_nodes(self, z=None, y=None, c=None, params=None, config=None): """Return a dictionary of graph nodes for training.""" LOGGER.info("Building prediction nodes.") with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): nodes = {'z': z} # Get slope tensor (for straight-through estimators) nodes['slope'] = tf.get_variable( 'slope', [], tf.float32, tf.constant_initializer(1.0), trainable=False) # --- Generator output --------------------------------------------- if params['use_binary_neurons']: if params.get('is_accompaniment'): nodes['fake_x'], nodes['fake_x_preactivated'] = self.gen( nodes['z'], y, c, False, nodes['slope']) else: nodes['fake_x'], nodes['fake_x_preactivated'] = self.gen( nodes['z'], y, False, nodes['slope']) else: if params.get('is_accompaniment'): nodes['fake_x'] = self.gen(nodes['z'], y, c, False) else: nodes['fake_x'] = self.gen(nodes['z'], y, False) # ============================ Save ops ============================ def _get_filepath(folder_name, name, suffix, ext): """Return the filename.""" if suffix: return os.path.join( config['result_dir'], folder_name, name, '{}_{}.{}'.format(name, str(suffix, 'utf8'), ext)) return os.path.join( config['result_dir'], folder_name, name, '{}.{}'.format(name, ext)) def _array_to_image(array, colormap=None): """Convert an array to an image array and return it.""" if array.ndim == 2: return vector_to_image(array) return pianoroll_to_image(array, colormap) # --- Save array ops ----------------------------------------------- if config['collect_save_arrays_op']: def _save_array(array, suffix, name): """Save the input array.""" filepath = _get_filepath('arrays', name, suffix, 'npy') np.save(filepath, array.astype(np.float16)) return np.array([0], np.int32) arrays = {'fake_x': nodes['fake_x']} if params['use_binary_neurons']: arrays['fake_x_preactivated'] = nodes['fake_x_preactivated'] save_array_ops = [] for key, value in arrays.items(): save_array_ops.append(tf.py_func( lambda array, suffix, k=key: _save_array( array, suffix, k), [value, config['suffix']], tf.int32)) make_sure_path_exists( os.path.join(config['result_dir'], 'arrays', key)) nodes['save_arrays_op'] = tf.group(save_array_ops) # --- Save image ops ----------------------------------------------- if config['collect_save_images_op']: def _save_image_grid(array, suffix, name): image = image_grid(array, config['image_grid']) filepath = _get_filepath('images', name, suffix, 'png') imageio.imwrite(filepath, image) return np.array([0], np.int32) def _save_images(array, suffix, name): """Save the input image.""" if 'hard_thresholding' in name: array = (array > 0).astype(np.float32) elif 'bernoulli_sampling' in name: rand_num = np.random.uniform(size=array.shape) array = (.5 * (array + 1.) > rand_num) array = array.astype(np.float32) images = _array_to_image(array) return _save_image_grid(images, suffix, name) def _save_colored_images(array, suffix, name): """Save the input image.""" if 'hard_thresholding' in name: array = (array > 0).astype(np.float32) elif 'bernoulli_sampling' in name: rand_num = np.random.uniform(size=array.shape) array = (.5 * (array + 1.) > rand_num) array = array.astype(np.float32) images = _array_to_image(array, config['colormap']) return _save_image_grid(images, suffix, name) images = {'fake_x': .5 * (nodes['fake_x'] + 1.)} if params['use_binary_neurons']: images['fake_x_preactivated'] = .5 * ( nodes['fake_x_preactivated'] + 1.) else: images['fake_x_hard_thresholding'] = nodes['fake_x'] images['fake_x_bernoulli_sampling'] = nodes['fake_x'] save_image_ops = [] for key, value in images.items(): save_image_ops.append(tf.py_func( lambda array, suffix, k=key: _save_images( array, suffix, k), [value, config['suffix']], tf.int32)) save_image_ops.append(tf.py_func( lambda array, suffix, k=key: _save_colored_images( array, suffix, k + '_colored'), [value, config['suffix']], tf.int32)) make_sure_path_exists(os.path.join( config['result_dir'], 'images', key)) make_sure_path_exists(os.path.join( config['result_dir'], 'images', key + '_colored')) nodes['save_images_op'] = tf.group(save_image_ops) # --- Save pianoroll ops ------------------------------------------- if config['collect_save_pianorolls_op']: def _save_pianoroll(array, suffix, name): filepath = _get_filepath('pianorolls', name, suffix, 'npz') if 'hard_thresholding' in name: array = (array > 0) elif 'bernoulli_sampling' in name: rand_num = np.random.uniform(size=array.shape) array = (.5 * (array + 1.) > rand_num) save_pianoroll( filepath, array, config['midi']['programs'], list(map(bool, config['midi']['is_drums'])), config['midi']['tempo'], params['beat_resolution'], config['midi']['lowest_pitch']) return np.array([0], np.int32) if params['use_binary_neurons']: pianorolls = {'fake_x': nodes['fake_x'] > 0} else: pianorolls = { 'fake_x_hard_thresholding': nodes['fake_x'], 'fake_x_bernoulli_sampling': nodes['fake_x']} save_pianoroll_ops = [] for key, value in pianorolls.items(): save_pianoroll_ops.append(tf.py_func( lambda array, suffix, k=key: _save_pianoroll(array, suffix, k), [value, config['suffix']], tf.int32)) make_sure_path_exists( os.path.join(config['result_dir'], 'pianorolls', key)) nodes['save_pianorolls_op'] = tf.group(save_pianoroll_ops) return nodes
Return a dictionary of graph nodes for training.
get_predict_nodes
python
salu133445/musegan
src/musegan/model.py
https://github.com/salu133445/musegan/blob/master/src/musegan/model.py
MIT
def _array_to_image(array, colormap=None): """Convert an array to an image array and return it.""" if array.ndim == 2: return vector_to_image(array) return pianoroll_to_image(array, colormap)
Convert an array to an image array and return it.
_array_to_image
python
salu133445/musegan
src/musegan/model.py
https://github.com/salu133445/musegan/blob/master/src/musegan/model.py
MIT
def make_sure_path_exists(path): """Create intermidate directories if the path does not exist.""" try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise
Create intermidate directories if the path does not exist.
make_sure_path_exists
python
salu133445/musegan
src/musegan/utils.py
https://github.com/salu133445/musegan/blob/master/src/musegan/utils.py
MIT
def update_not_none(dict1, dict2): """Update the values of keys in `dict1` with the values of the same key from `dict2` if the values in `dict2` is not None.""" for key, value in dict2.items(): if value is not None: dict1[key] = value
Update the values of keys in `dict1` with the values of the same key from `dict2` if the values in `dict2` is not None.
update_not_none
python
salu133445/musegan
src/musegan/utils.py
https://github.com/salu133445/musegan/blob/master/src/musegan/utils.py
MIT
def update_existing(dict1, dict2): """Update the values of keys in `dict1` with the values of the same key from `dict2` if the values in `dict2` is not None and the same key is in `dict1`. """ for key, value in dict2.items(): if value is not None and key in dict1: dict1[key] = value
Update the values of keys in `dict1` with the values of the same key from `dict2` if the values in `dict2` is not None and the same key is in `dict1`.
update_existing
python
salu133445/musegan
src/musegan/utils.py
https://github.com/salu133445/musegan/blob/master/src/musegan/utils.py
MIT
def load_component(component, name, class_name): """Load and return component network from file.""" imported = importlib.import_module( '.'.join(('musegan.presets', component, name))) return getattr(imported, class_name)
Load and return component network from file.
load_component
python
salu133445/musegan
src/musegan/utils.py
https://github.com/salu133445/musegan/blob/master/src/musegan/utils.py
MIT
def add_file_handler(logger, log_filepath, loglevel=FILE_LOGLEVEL, log_format=FILE_LOG_FORMAT): """Add a file handler to the logger.""" file_handler = logging.FileHandler(log_filepath) file_handler.setLevel(loglevel) file_handler.setFormatter(logging.Formatter(log_format)) logger.addHandler(file_handler)
Add a file handler to the logger.
add_file_handler
python
salu133445/musegan
src/musegan/utils.py
https://github.com/salu133445/musegan/blob/master/src/musegan/utils.py
MIT
def setup_loggers(log_dir, loglevel=FILE_LOGLEVEL, log_format=FILE_LOG_FORMAT): """Setup the loggers with file handlers.""" for name in logging.Logger.manager.loggerDict.keys(): if name.startswith('musegan'): add_file_handler( logging.getLogger(name), os.path.join(log_dir, name + '.log'), loglevel, log_format)
Setup the loggers with file handlers.
setup_loggers
python
salu133445/musegan
src/musegan/utils.py
https://github.com/salu133445/musegan/blob/master/src/musegan/utils.py
MIT
def binary_round(x): """ Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1}, using the straight through estimator for the gradient. """ g = tf.get_default_graph() with ops.name_scope("BinaryRound") as name: with g.gradient_override_map({"Round": "Identity"}): return tf.round(x, name=name)
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1}, using the straight through estimator for the gradient.
binary_round
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def bernoulli_sample(x): """ Uses a tensor whose values are in [0,1] to sample a tensor with values in {0, 1}, using the straight through estimator for the gradient. E.g., if x is 0.6, bernoulliSample(x) will be 1 with probability 0.6, and 0 otherwise, and the gradient will be pass-through (identity). """ g = tf.get_default_graph() with ops.name_scope("BernoulliSample") as name: with g.gradient_override_map({"Ceil": "Identity", "Sub": "BernoulliSample_ST"}): return tf.ceil(x - tf.random_uniform(tf.shape(x)), name=name)
Uses a tensor whose values are in [0,1] to sample a tensor with values in {0, 1}, using the straight through estimator for the gradient. E.g., if x is 0.6, bernoulliSample(x) will be 1 with probability 0.6, and 0 otherwise, and the gradient will be pass-through (identity).
bernoulli_sample
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def pass_through_sigmoid(x, slope=1): """Sigmoid that uses identity function as its gradient""" g = tf.get_default_graph() with ops.name_scope("PassThroughSigmoid") as name: with g.gradient_override_map({"Sigmoid": "Identity"}): return tf.sigmoid(x, name=name)
Sigmoid that uses identity function as its gradient
pass_through_sigmoid
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def binary_stochastic_ST(x, slope_tensor=None, pass_through=True, stochastic=True): """ Sigmoid followed by either a random sample from a bernoulli distribution according to the result (binary stochastic neuron) (default), or a sigmoid followed by a binary step function (if stochastic == False). Uses the straight through estimator. See https://arxiv.org/abs/1308.3432. Arguments: * x: the pre-activation / logit tensor * slope_tensor: if passThrough==False, slope adjusts the slope of the sigmoid function for purposes of the Slope Annealing Trick (see http://arxiv.org/abs/1609.01704) * pass_through: if True (default), gradient of the entire function is 1 or 0; if False, gradient of 1 is scaled by the gradient of the sigmoid (required if Slope Annealing Trick is used) * stochastic: binary stochastic neuron if True (default), or step function if False """ if slope_tensor is None: slope_tensor = tf.constant(1.0) if pass_through: p = pass_through_sigmoid(x) else: p = tf.sigmoid(slope_tensor * x) if stochastic: return bernoulli_sample(p), p else: return binary_round(p), p
Sigmoid followed by either a random sample from a bernoulli distribution according to the result (binary stochastic neuron) (default), or a sigmoid followed by a binary step function (if stochastic == False). Uses the straight through estimator. See https://arxiv.org/abs/1308.3432. Arguments: * x: the pre-activation / logit tensor * slope_tensor: if passThrough==False, slope adjusts the slope of the sigmoid function for purposes of the Slope Annealing Trick (see http://arxiv.org/abs/1609.01704) * pass_through: if True (default), gradient of the entire function is 1 or 0; if False, gradient of 1 is scaled by the gradient of the sigmoid (required if Slope Annealing Trick is used) * stochastic: binary stochastic neuron if True (default), or step function if False
binary_stochastic_ST
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def binary_stochastic_REINFORCE(x, loss_op_name="loss_by_example"): """ Sigmoid followed by a random sample from a bernoulli distribution according to the result (binary stochastic neuron). Uses the REINFORCE estimator. See https://arxiv.org/abs/1308.3432. NOTE: Requires a loss operation with name matching the argument for loss_op_name in the graph. This loss operation should be broken out by example (i.e., not a single number for the entire batch). """ g = tf.get_default_graph() with ops.name_scope("BinaryStochasticREINFORCE"): with g.gradient_override_map({"Sigmoid": "BinaryStochastic_REINFORCE", "Ceil": "Identity"}): p = tf.sigmoid(x) reinforce_collection = g.get_collection("REINFORCE") if not reinforce_collection: g.add_to_collection("REINFORCE", {}) reinforce_collection = g.get_collection("REINFORCE") reinforce_collection[0][p.op.name] = loss_op_name return tf.ceil(p - tf.random_uniform(tf.shape(x)))
Sigmoid followed by a random sample from a bernoulli distribution according to the result (binary stochastic neuron). Uses the REINFORCE estimator. See https://arxiv.org/abs/1308.3432. NOTE: Requires a loss operation with name matching the argument for loss_op_name in the graph. This loss operation should be broken out by example (i.e., not a single number for the entire batch).
binary_stochastic_REINFORCE
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def _binaryStochastic_REINFORCE(op, _): """Unbiased estimator for binary stochastic function based on REINFORCE.""" loss_op_name = op.graph.get_collection("REINFORCE")[0][op.name] loss_tensor = op.graph.get_operation_by_name(loss_op_name).outputs[0] sub_tensor = op.outputs[0].consumers()[0].outputs[0] #subtraction tensor ceil_tensor = sub_tensor.consumers()[0].outputs[0] #ceiling tensor outcome_diff = (ceil_tensor - op.outputs[0]) # Provides an early out if we want to avoid variance adjustment for # whatever reason (e.g., to show that variance adjustment helps) if op.graph.get_collection("REINFORCE")[0].get("no_variance_adj"): return outcome_diff * tf.expand_dims(loss_tensor, 1) outcome_diff_sq = tf.square(outcome_diff) outcome_diff_sq_r = tf.reduce_mean(outcome_diff_sq, reduction_indices=0) outcome_diff_sq_loss_r = tf.reduce_mean( outcome_diff_sq * tf.expand_dims(loss_tensor, 1), reduction_indices=0) l_bar_num = tf.Variable(tf.zeros(outcome_diff_sq_r.get_shape()), trainable=False) l_bar_den = tf.Variable(tf.ones(outcome_diff_sq_r.get_shape()), trainable=False) # Note: we already get a decent estimate of the average from the minibatch decay = 0.95 train_l_bar_num = tf.assign(l_bar_num, l_bar_num*decay +\ outcome_diff_sq_loss_r*(1-decay)) train_l_bar_den = tf.assign(l_bar_den, l_bar_den*decay +\ outcome_diff_sq_r*(1-decay)) with tf.control_dependencies([train_l_bar_num, train_l_bar_den]): l_bar = train_l_bar_num/(train_l_bar_den + 1e-4) l = tf.tile(tf.expand_dims(loss_tensor, 1), tf.constant([1, l_bar.get_shape().as_list()[0]])) return outcome_diff * (l - l_bar)
Unbiased estimator for binary stochastic function based on REINFORCE.
_binaryStochastic_REINFORCE
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def binary_wrapper(pre_activations_tensor, estimator, stochastic_tensor=tf.constant(True), pass_through=True, slope_tensor=tf.constant(1.0)): """ Turns a layer of pre-activations (logits) into a layer of binary stochastic neurons Keyword arguments: *estimator: either ST or REINFORCE *stochastic_tensor: a boolean tensor indicating whether to sample from a bernoulli distribution (True, default) or use a step_function (e.g., for inference) *pass_through: for ST only - boolean as to whether to substitute identity derivative on the backprop (True, default), or whether to use the derivative of the sigmoid *slope_tensor: for ST only - tensor specifying the slope for purposes of slope annealing trick """ if estimator == 'straight_through': if pass_through: return tf.cond( stochastic_tensor, lambda: binary_stochastic_ST(pre_activations_tensor), lambda: binary_stochastic_ST(pre_activations_tensor, stochastic=False)) else: return tf.cond( stochastic_tensor, lambda: binary_stochastic_ST(pre_activations_tensor, slope_tensor, False), lambda: binary_stochastic_ST(pre_activations_tensor, slope_tensor, False, False)) elif estimator == 'reinforce': # binaryStochastic_REINFORCE was designed to only be stochastic, so # using the ST version for the step fn for purposes of using step # fn at evaluation / not for training return tf.cond( stochastic_tensor, lambda: binary_stochastic_REINFORCE(pre_activations_tensor), lambda: binary_stochastic_ST(pre_activations_tensor, stochastic=False)) else: raise ValueError("Unrecognized estimator.")
Turns a layer of pre-activations (logits) into a layer of binary stochastic neurons Keyword arguments: *estimator: either ST or REINFORCE *stochastic_tensor: a boolean tensor indicating whether to sample from a bernoulli distribution (True, default) or use a step_function (e.g., for inference) *pass_through: for ST only - boolean as to whether to substitute identity derivative on the backprop (True, default), or whether to use the derivative of the sigmoid *slope_tensor: for ST only - tensor specifying the slope for purposes of slope annealing trick
binary_wrapper
python
salu133445/musegan
src/musegan/presets/binary_ops.py
https://github.com/salu133445/musegan/blob/master/src/musegan/presets/binary_ops.py
MIT
def eval(self, batch, output_type=0, quiet=False, save_fig=False, fig_dir='./'): """ Evaluate one batch of bars according to eval_map and eval_pair Args: batch (tensor): The input tensor. output_type (int): 0 for scalar (mean of list), 1 for list quiet (bool): if true, print the values save_fig (bool): if true, plot figures and save them under 'fig_dir' fig_dir (str): dir to store images Returns: score_matrix: result of eval map score_pair_matrix: result of eval pair """ batch = np.reshape(batch,(-1, 96, 84, 5)) num_batch = len(batch) score_matrix = np.zeros((self.metrics_num, self.track_num, num_batch)) * np.nan score_pair_matrix = np.zeros((self.pair_num, num_batch)) * np.nan for idx in range(num_batch): bar = batch[idx] # compute eval map for t in range(self.track_num): if(self.eval_map[0, t]): bar_act = self.metric_is_empty_bar(batch[idx, :, :, t]) score_matrix[0, t, idx] = bar_act if(self.eval_map[1, t] and not bar_act): score_matrix[1, t, idx] = self.metric_num_pitch_used(batch[idx, :, :, t]) if(self.eval_map[2, t] and not bar_act): score_matrix[2, t, idx]= self.metric_qualified_note_ratio(batch[idx, :, :, t]) if(self.eval_map[3, t] and not bar_act): score_matrix[3, t, idx]= self.metric_polyphonic_ratio(batch[idx, :, :, t]) if(self.eval_map[4, t] and not bar_act): score_matrix[4, t, idx]= self.metric_in_scale(self.to_chroma(batch[idx, :, :, t])) if(self.eval_map[5, t] and not bar_act): score_matrix[5, t, idx]= self.metric_drum_pattern(batch[idx, :, :, t]) if(self.eval_map[6, t] and not bar_act): score_matrix[6, t, idx]= self.metric_num_pitch_used(self.to_chroma(batch[idx, :, :, t])) # compute eval pair for p in range(self.pair_num): pair = self.inter_pair[p] score_pair_matrix[p, idx] = self.metrics_harmonicity(self.to_chroma(batch[idx, :, :, pair[0]]), self.to_chroma(batch[idx, :, :, pair[1]])) score_matrix_mean = np.nanmean(score_matrix, axis=2) score_pair_matrix_mean = np.nanmean(score_pair_matrix, axis=1) if not quiet: print('# Data Size:', batch.shape, ' # num of Metrics:', np.sum(self.eval_map)) self.print_metrics_mat(score_matrix_mean) self.print_metrics_pair(score_pair_matrix_mean) # save figures and save info as npy files if save_fig: if not os.path.exists(fig_dir): os.makedirs(fig_dir) print('[*] Plotting Figures...') # plot figures for each metric for i in range(len(self.metric_names)): for j in range(len(self.track_names)): if self.eval_map[i, j]: self.plot_histogram(score_matrix[i, j], fig_dir=fig_dir, title='['+self.metric_names[i]+']_'+self.track_names[j]) # info dict info = {'score_matrix_mean': score_matrix_mean, 'score_pair_matrix_mean': score_pair_matrix_mean, 'score_matrix': score_matrix, 'score_pair_matrix': score_pair_matrix} np.save(os.path.join(fig_dir, 'info.npy'), info) print('[*] Done!! saved in %s' %(fig_dir)) # return vlaues if output_type is 0: # mean vlaue, scalar return score_matrix_mean, score_pair_matrix_mean if output_type is 1: # list of values return score_matrix, score_pair_matrix
Evaluate one batch of bars according to eval_map and eval_pair Args: batch (tensor): The input tensor. output_type (int): 0 for scalar (mean of list), 1 for list quiet (bool): if true, print the values save_fig (bool): if true, plot figures and save them under 'fig_dir' fig_dir (str): dir to store images Returns: score_matrix: result of eval map score_pair_matrix: result of eval pair
eval
python
salu133445/musegan
v1/musegan/eval/metrics.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/eval/metrics.py
MIT
def get_placeholder(default_tensor=None, shape=None, name=None): """Return a placeholder_wirh_default if default_tensor given, otherwise a new placeholder is created and return""" if default_tensor is not None: return default_tensor else: if shape is None: raise ValueError('One of default_tensor and shape must be given') return tf.placeholder(tf.float32, shape=shape, name=name)
Return a placeholder_wirh_default if default_tensor given, otherwise a new placeholder is created and return
get_placeholder
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def batch_norm(tensor_in, apply=True): """ Apply a batch normalization layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. apply (bool): True to apply. False to bypass batch normalization. Defaults to True. Returns: tensor: The resulting tensor. """ if tensor_in is not None and apply: return tf.contrib.layers.batch_norm(tensor_in, decay=0.9, epsilon=1e-5, updates_collections=None, scale=True) else: return tensor_in
Apply a batch normalization layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. apply (bool): True to apply. False to bypass batch normalization. Defaults to True. Returns: tensor: The resulting tensor.
batch_norm
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def lrelu(tensor_in, alpha=0.2): """Apply a leaky ReLU layer on the input tensor and return the resulting tensor. (alpha defaults to 0.2)""" if tensor_in is not None: return tf.maximum(tensor_in, alpha*tensor_in) else: return tensor_in
Apply a leaky ReLU layer on the input tensor and return the resulting tensor. (alpha defaults to 0.2)
lrelu
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def relu(tensor_in): """Apply a ReLU layer on the input tensor and return the resulting tensor.""" if tensor_in is not None: return tf.nn.relu(tensor_in) else: return tensor_in
Apply a ReLU layer on the input tensor and return the resulting tensor.
relu
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def concat_cond_conv(x, condition=None): """Concatenate conditioning vector on feature map axis.""" if condition is None: return x else: reshape_shape = tf.stack([tf.shape(x)[0], 1, 1, condition.get_shape()[1]]) out_shape = tf.stack([tf.shape(x)[0], x.get_shape()[1], x.get_shape()[2], condition.get_shape()[1]]) to_concat = tf.reshape(condition, reshape_shape)*tf.ones(out_shape) return tf.concat([x, to_concat], 3)
Concatenate conditioning vector on feature map axis.
concat_cond_conv
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def concat_cond_lin(tensor_in, condition): """Concatenate conditioning vector on feature map axis.""" if condition is None: return tensor_in else: return tf.concat([tensor_in, condition, 1])
Concatenate conditioning vector on feature map axis.
concat_cond_lin
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def concat_prev(tensor_in, condition): """Concatenate conditioning vector on feature map axis.""" if condition is None: return tensor_in else: if tensor_in.get_shape()[1:3] == condition.get_shape()[1:3]: pad_shape = tf.stack([tf.shape(tensor_in)[0], tensor_in.get_shape()[1], tensor_in.get_shape()[2], condition.get_shape()[3]]) return tf.concat([tensor_in, condition*tf.ones(pad_shape)], 3) else: raise ValueError('unmatched shape:', tensor_in.get_shape(), 'and', condition.get_shape())
Concatenate conditioning vector on feature map axis.
concat_prev
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def conv2d(tensor_in, out_channels, kernels, strides, stddev=0.02, name='conv2d', reuse=None, padding='VALID'): """ Apply a 2D convolution layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. out_channels (int): The number of output channels. kernels (list of int): The size of the kernel. [kernel_height, kernel_width] strides (list of int): The stride of the sliding window. [stride_height, stride_width] stddev (float): The value passed to the truncated normal initializer for weights. Defaults to 0.02. name (str): The tenorflow variable scope. Defaults to 'conv2d'. reuse (bool): True to reuse weights and biases. padding (str): 'SAME' or 'VALID'. The type of padding algorithm to use. Defaults to 'VALID'. Returns: tensor: The resulting tensor. """ if tensor_in is None: return None else: with tf.variable_scope(name, reuse=reuse): print ('| |---'+tf.get_variable_scope().name, tf.get_variable_scope().reuse) weights = tf.get_variable('weights', kernels+[tensor_in.get_shape()[-1], out_channels], initializer=tf.truncated_normal_initializer(stddev=stddev)) biases = tf.get_variable('biases', [out_channels], initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d(tensor_in, weights, strides=[1]+strides+[1], padding=padding) out_shape = tf.stack([tf.shape(tensor_in)[0]]+list(conv.get_shape()[1:])) return tf.reshape(tf.nn.bias_add(conv, biases), out_shape)
Apply a 2D convolution layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. out_channels (int): The number of output channels. kernels (list of int): The size of the kernel. [kernel_height, kernel_width] strides (list of int): The stride of the sliding window. [stride_height, stride_width] stddev (float): The value passed to the truncated normal initializer for weights. Defaults to 0.02. name (str): The tenorflow variable scope. Defaults to 'conv2d'. reuse (bool): True to reuse weights and biases. padding (str): 'SAME' or 'VALID'. The type of padding algorithm to use. Defaults to 'VALID'. Returns: tensor: The resulting tensor.
conv2d
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def transconv2d(tensor_in, out_shape, out_channels, kernels, strides, stddev=0.02, name='transconv2d', reuse=None, padding='VALID'): """ Apply a 2D transposed convolution layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. out_shape (list of int): The output shape. [height, width] out_channels (int): The number of output channels. kernels (list of int): The size of the kernel.[kernel_height, kernel_width] strides (list of int): The stride of the sliding window. [stride_height, stride_width] stddev (float): The value passed to the truncated normal initializer for weights. Defaults to 0.02. name (str): The tenorflow variable scope. Defaults to 'transconv2d'. reuse (bool): True to reuse weights and biases. padding (str): 'SAME' or 'VALID'. The type of padding algorithm to use. Defaults to 'VALID'. Returns: tensor: The resulting tensor. """ if tensor_in is None: return None else: with tf.variable_scope(name, reuse=reuse): print('| |---'+tf.get_variable_scope().name, tf.get_variable_scope().reuse) # filter : [height, width, output_channels, in_channels] weights = tf.get_variable('weights', kernels+[out_channels, tensor_in.get_shape()[-1]], initializer=tf.truncated_normal_initializer(stddev=stddev)) biases = tf.get_variable('biases', [out_channels], initializer=tf.constant_initializer(0.0)) output_shape = tf.stack([tf.shape(tensor_in)[0]]+out_shape+[out_channels]) try: conv_transpose = tf.nn.conv2d_transpose(tensor_in, weights, output_shape=output_shape, strides=[1]+strides+[1], padding=padding) except AttributeError: # Support for verisons of TensorFlow before 0.7.0 conv_transpose = tf.nn.deconv2d(tensor_in, weights, output_shape=output_shape, strides=[1]+strides+[1], padding=padding) return tf.reshape(tf.nn.bias_add(conv_transpose, biases), output_shape)
Apply a 2D transposed convolution layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. out_shape (list of int): The output shape. [height, width] out_channels (int): The number of output channels. kernels (list of int): The size of the kernel.[kernel_height, kernel_width] strides (list of int): The stride of the sliding window. [stride_height, stride_width] stddev (float): The value passed to the truncated normal initializer for weights. Defaults to 0.02. name (str): The tenorflow variable scope. Defaults to 'transconv2d'. reuse (bool): True to reuse weights and biases. padding (str): 'SAME' or 'VALID'. The type of padding algorithm to use. Defaults to 'VALID'. Returns: tensor: The resulting tensor.
transconv2d
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def linear(tensor_in, output_size, stddev=0.02, bias_init=0.0, name='linear', reuse=None): """ Apply a linear layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. output_size (int): The output size. stddev (float): The value passed to the truncated normal initializer for weights. Defaults to 0.02. bias_init (float): The value passed to constant initializer for weights. Defaults to 0.0. name (str): The tenorflow variable scope. Defaults to 'linear'. reuse (bool): True to reuse weights and biases. padding (str): 'SAME' or 'VALID'. The type of padding algorithm to use. Defaults to 'VALID'. Returns: tensor: The resulting tensor. """ if tensor_in is None: return None else: with tf.variable_scope(name, reuse=reuse): print ('| |---'+tf.get_variable_scope().name, tf.get_variable_scope().reuse) weights = tf.get_variable('weights', [tensor_in.get_shape()[1], output_size], tf.float32, initializer=tf.truncated_normal_initializer(stddev=stddev)) biases = tf.get_variable('biases', [output_size], initializer=tf.constant_initializer(bias_init)) lin = tf.nn.bias_add(tf.matmul(tensor_in, weights), biases) return lin
Apply a linear layer on the input tensor and return the resulting tensor. Args: tensor_in (tensor): The input tensor. output_size (int): The output size. stddev (float): The value passed to the truncated normal initializer for weights. Defaults to 0.02. bias_init (float): The value passed to constant initializer for weights. Defaults to 0.0. name (str): The tenorflow variable scope. Defaults to 'linear'. reuse (bool): True to reuse weights and biases. padding (str): 'SAME' or 'VALID'. The type of padding algorithm to use. Defaults to 'VALID'. Returns: tensor: The resulting tensor.
linear
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def to_chroma_tf(bar_or_track_bar, is_normalize=True): """Return the chroma tensor of the input tensor""" out_shape = tf.stack([tf.shape(bar_or_track_bar)[0], bar_or_track_bar.get_shape()[1], 12, 7, bar_or_track_bar.get_shape()[3]]) chroma = tf.reduce_sum(tf.reshape(tf.cast(bar_or_track_bar, tf.float32), out_shape), axis=3) if is_normalize: chroma_max = tf.reduce_max(chroma, axis=(1, 2, 3), keep_dims=True) chroma_min = tf.reduce_min(chroma, axis=(1, 2, 3), keep_dims=True) return tf.truediv(chroma - chroma_min, (chroma_max - chroma_min + 1e-15)) else: return chroma
Return the chroma tensor of the input tensor
to_chroma_tf
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def to_binary_tf(bar_or_track_bar, threshold=0.0, track_mode=False, melody=False): """Return the binarize tensor of the input tensor (be careful of the channel order!)""" if track_mode: # melody track if melody: melody_is_max = tf.equal(bar_or_track_bar, tf.reduce_max(bar_or_track_bar, axis=2, keep_dims=True)) melody_pass_threshold = (bar_or_track_bar > threshold) out_tensor = tf.logical_and(melody_is_max, melody_pass_threshold) # non-melody track else: out_tensor = (bar_or_track_bar > threshold) return out_tensor else: if len(bar_or_track_bar.get_shape()) == 4: melody_track = tf.slice(bar_or_track_bar, [0, 0, 0, 0], [-1, -1, -1, 1]) other_tracks = tf.slice(bar_or_track_bar, [0, 0, 0, 1], [-1, -1, -1, -1]) elif len(bar_or_track_bar.get_shape()) == 5: melody_track = tf.slice(bar_or_track_bar, [0, 0, 0, 0, 0], [-1, -1, -1, -1, 1]) other_tracks = tf.slice(bar_or_track_bar, [0, 0, 0, 0, 1], [-1, -1, -1, -1, -1]) # melody track melody_is_max = tf.equal(melody_track, tf.reduce_max(melody_track, axis=2, keep_dims=True)) melody_pass_threshold = (melody_track > threshold) out_tensor_melody = tf.logical_and(melody_is_max, melody_pass_threshold) # other tracks out_tensor_others = (other_tracks > threshold) if len(bar_or_track_bar.get_shape()) == 4: return tf.concat([out_tensor_melody, out_tensor_others], 3) elif len(bar_or_track_bar.get_shape()) == 5: return tf.concat([out_tensor_melody, out_tensor_others], 4)
Return the binarize tensor of the input tensor (be careful of the channel order!)
to_binary_tf
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def to_image_tf(tensor_in, colormap=None): """Reverse the second dimension and swap the second dimension and the third dimension""" if colormap is None: colormap = get_colormap() shape = tf.stack([-1, tensor_in.get_shape()[1], tensor_in.get_shape()[2], 3]) recolored = tf.reshape(tf.matmul(tf.reshape(tensor_in, [-1, 5]), colormap), shape) return tf.transpose(tf.reverse_v2(recolored, axis=[2]), [0, 2, 1, 3])
Reverse the second dimension and swap the second dimension and the third dimension
to_image_tf
python
salu133445/musegan
v1/musegan/libs/ops.py
https://github.com/salu133445/musegan/blob/master/v1/musegan/libs/ops.py
MIT
def load_data(): """Load and return the training data.""" print('[*] Loading data...') # Load data from SharedArray if CONFIG['data']['training_data_location'] == 'sa': import SharedArray as sa x_train = sa.attach(CONFIG['data']['training_data']) # Load data from hard disk elif CONFIG['data']['training_data_location'] == 'hd': if os.path.isabs(CONFIG['data']['training_data']): x_train = np.load(CONFIG['data']['training_data']) else: filepath = os.path.abspath(os.path.join( os.path.realpath(__file__), 'training_data', CONFIG['data']['training_data'])) x_train = np.load(filepath) # Reshape data x_train = x_train.reshape( -1, CONFIG['model']['num_bar'], CONFIG['model']['num_timestep'], CONFIG['model']['num_pitch'], CONFIG['model']['num_track']) print('Training set size:', len(x_train)) return x_train
Load and return the training data.
load_data
python
salu133445/musegan
v2/main.py
https://github.com/salu133445/musegan/blob/master/v2/main.py
MIT
def get_adversarial_loss(self, discriminator, scope_to_reuse=None): """Return the adversarial losses for the generator and the discriminator.""" if self.config['gan']['type'] == 'gan': d_loss_real = tf.losses.sigmoid_cross_entropy( tf.ones_like(self.D_real.tensor_out), self.D_real.tensor_out) d_loss_fake = tf.losses.sigmoid_cross_entropy( tf.zeros_like(self.D_fake.tensor_out), self.D_fake.tensor_out) adv_loss_d = d_loss_real + d_loss_fake adv_loss_g = tf.losses.sigmoid_cross_entropy( tf.ones_like(self.D_fake.tensor_out), self.D_fake.tensor_out) if (self.config['gan']['type'] == 'wgan' or self.config['gan']['type'] == 'wgan-gp'): adv_loss_d = (tf.reduce_mean(self.D_fake.tensor_out) - tf.reduce_mean(self.D_real.tensor_out)) adv_loss_g = -tf.reduce_mean(self.D_fake.tensor_out) if self.config['gan']['type'] == 'wgan-gp': eps = tf.random_uniform( [tf.shape(self.x_)[0], 1, 1, 1, 1], 0.0, 1.0) inter = eps * self.x_ + (1. - eps) * self.G.tensor_out if scope_to_reuse is None: D_inter = discriminator(inter, self.config, name='D', reuse=True) else: with tf.variable_scope(scope_to_reuse, reuse=True): D_inter = discriminator(inter, self.config, name='D', reuse=True) gradient = tf.gradients(D_inter.tensor_out, inter)[0] slopes = tf.sqrt(1e-8 + tf.reduce_sum( tf.square(gradient), tf.range(1, len(gradient.get_shape())))) gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0)) adv_loss_d += (self.config['gan']['gp_coefficient'] * gradient_penalty) return adv_loss_g, adv_loss_d
Return the adversarial losses for the generator and the discriminator.
get_adversarial_loss
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def get_statistics(self): """Return model statistics (number of paramaters for each component).""" def get_num_parameter(var_list): """Given the variable list, return the total number of parameters. """ return int(np.sum([np.product([x.value for x in var.get_shape()]) for var in var_list])) num_par = get_num_parameter(tf.trainable_variables( self.scope.name)) num_par_g = get_num_parameter(self.G.vars) num_par_d = get_num_parameter(self.D_fake.vars) return ("Number of parameters: {}\nNumber of parameters in G: {}\n" "Number of parameters in D: {}".format(num_par, num_par_g, num_par_d))
Return model statistics (number of paramaters for each component).
get_statistics
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def get_num_parameter(var_list): """Given the variable list, return the total number of parameters. """ return int(np.sum([np.product([x.value for x in var.get_shape()]) for var in var_list]))
Given the variable list, return the total number of parameters.
get_num_parameter
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def save_statistics(self, filepath=None): """Save model statistics to file. Default to save to the log directory given as a global variable.""" if filepath is None: filepath = os.path.join(self.config['log_dir'], 'model_statistics.txt') with open(filepath, 'w') as f: f.write(self.get_statistics())
Save model statistics to file. Default to save to the log directory given as a global variable.
save_statistics
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def save_summary(self, filepath=None): """Save model summary to file. Default to save to the log directory given as a global variable.""" if filepath is None: filepath = os.path.join(self.config['log_dir'], 'model_summary.txt') with open(filepath, 'w') as f: f.write(self.get_summary())
Save model summary to file. Default to save to the log directory given as a global variable.
save_summary
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def save(self, filepath=None): """Save the model to a checkpoint file. Default to save to the log directory given as a global variable.""" if filepath is None: filepath = os.path.join(self.config['checkpoint_dir'], self.name + '.model') print('[*] Saving checkpoint...') self.saver.save(self.sess, filepath, self.global_step)
Save the model to a checkpoint file. Default to save to the log directory given as a global variable.
save
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def load_latest(self, checkpoint_dir=None): """Load the model from the latest checkpoint in a directory.""" if checkpoint_dir is None: checkpoint_dir = self.config['checkpoint_dir'] print('[*] Loading checkpoint...') with open(os.path.join(checkpoint_dir, 'checkpoint')) as f: checkpoint_name = os.path.basename( f.readline().split()[1].strip('"')) checkpoint_path = os.path.realpath( os.path.join(checkpoint_dir, checkpoint_name)) if checkpoint_path is None: raise ValueError("Checkpoint not found") self.saver.restore(self.sess, checkpoint_path)
Load the model from the latest checkpoint in a directory.
load_latest
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def save_samples(self, filename, samples, save_midi=False, shape=None, postfix=None): """Save samples to an image file (and a MIDI file).""" if shape is None: shape = self.config['sample_grid'] if len(samples) > self.config['num_sample']: samples = samples[:self.config['num_sample']] if postfix is None: imagepath = os.path.join(self.config['sample_dir'], '{}.png'.format(filename)) else: imagepath = os.path.join(self.config['sample_dir'], '{}_{}.png'.format(filename, postfix)) image_io.save_image(imagepath, samples, shape) if save_midi: binarized = (samples > 0) midipath = os.path.join(self.config['sample_dir'], '{}.mid'.format(filename)) midi_io.save_midi(midipath, binarized, self.config)
Save samples to an image file (and a MIDI file).
save_samples
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def run_sampler(self, targets, feed_dict, save_midi=False, postfix=None): """Run the target operation with feed_dict and save the samples.""" if not isinstance(targets, list): targets = [targets] results = self.sess.run(targets, feed_dict) results = [result[:self.config['num_sample']] for result in results] samples = np.stack(results, 1).reshape((-1,) + results[0].shape[1:]) shape = [self.config['sample_grid'][0], self.config['sample_grid'][1] * len(results)] if postfix is None: filename = self.get_global_step_str() else: filename = self.get_global_step_str() + '_' + postfix self.save_samples(filename, samples, save_midi, shape)
Run the target operation with feed_dict and save the samples.
run_sampler
python
salu133445/musegan
v2/musegan/model.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/model.py
MIT
def build(self, config): """Build the end-to-end generator.""" nets = OrderedDict() nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'], name='shared') nets['pitch_time_private'] = [ NeuralNet(nets['shared'].tensor_out, config['net_g']['pitch_time_private'], name='pt_'+str(idx)) for idx in range(config['num_track']) ] nets['time_pitch_private'] = [ NeuralNet(nets['shared'].tensor_out, config['net_g']['time_pitch_private'], name='tp_'+str(idx)) for idx in range(config['num_track']) ] nets['merged_private'] = [ NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out, nets['time_pitch_private'][idx].tensor_out], -1), config['net_g']['merged_private'], name='merged_'+str(idx)) for idx in range(config['num_track']) ] nets['refiner_private'] = [ NeuralNet(nets['merged_private'][idx].tensor_out, config['net_r']['private'], slope_tensor=self.slope_tensor, name='refiner_private'+str(idx)) for idx in range(config['num_track']) ] return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets, tf.concat([nn.layers[-1].preactivated for nn in nets['private']], -1))
Build the end-to-end generator.
build
python
salu133445/musegan
v2/musegan/bmusegan/components.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/bmusegan/components.py
MIT
def get_image_grid(images, shape, grid_width=0, grid_color=0, frame=False): """ Merge the input images and return a merged grid image. Arguments --------- images : np.array, ndim=3 The image array. Shape is (num_image, height, width). shape : list or tuple of int Shape of the image grid. (height, width) grid_width : int Width of the grid lines. Default to 0. grid_color : int Color of the grid lines. Available values are 0 (black) to 255 (white). Default to 0. frame : bool True to add frame. Default to False. Returns ------- merged : np.array, ndim=3 The merged grid image. """ reshaped = images.reshape(shape[0], shape[1], images.shape[1], images.shape[2]) pad_width = ((0, 0), (0, 0), (grid_width, 0), (grid_width, 0)) padded = np.pad(reshaped, pad_width, 'constant', constant_values=grid_color) transposed = padded.transpose(0, 2, 1, 3) merged = transposed.reshape(shape[0] * (images.shape[1] + grid_width), shape[1] * (images.shape[2] + grid_width)) if frame: return np.pad(merged, ((0, grid_width), (0, grid_width)), 'constant', constant_values=grid_color) return merged[grid_width:, grid_width:]
Merge the input images and return a merged grid image. Arguments --------- images : np.array, ndim=3 The image array. Shape is (num_image, height, width). shape : list or tuple of int Shape of the image grid. (height, width) grid_width : int Width of the grid lines. Default to 0. grid_color : int Color of the grid lines. Available values are 0 (black) to 255 (white). Default to 0. frame : bool True to add frame. Default to False. Returns ------- merged : np.array, ndim=3 The merged grid image.
get_image_grid
python
salu133445/musegan
v2/musegan/utils/image_io.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/image_io.py
MIT
def save_image(filepath, phrases, shape, inverted=True, grid_width=3, grid_color=0, frame=True): """ Save a batch of phrases to a single image grid. Arguments --------- filepath : str Path to save the image grid. phrases : np.array, ndim=5 The phrase array. Shape is (num_phrase, num_bar, num_time_step, num_pitch, num_track). shape : list or tuple of int Shape of the image grid. (height, width) inverted : bool True to invert the colors. Default to True. grid_width : int Width of the grid lines. Default to 3. grid_color : int Color of the grid lines. Available values are 0 (black) to 255 (white). Default to 0. frame : bool True to add frame. Default to True. """ if phrases.dtype == np.bool_: if inverted: phrases = np.logical_not(phrases) clipped = (phrases * 255).astype(np.uint8) else: if inverted: phrases = 1. - phrases clipped = (phrases * 255.).clip(0, 255).astype(np.uint8) flipped = np.flip(clipped, 3) transposed = flipped.transpose(0, 4, 1, 3, 2) reshaped = transposed.reshape(-1, phrases.shape[1] * phrases.shape[4], phrases.shape[3], phrases.shape[2]) merged_phrases = [] phrase_shape = (phrases.shape[4], phrases.shape[1]) for phrase in reshaped: merged_phrases.append(get_image_grid(phrase, phrase_shape, 1, grid_color)) merged = get_image_grid(np.stack(merged_phrases), shape, grid_width, grid_color, frame) imageio.imwrite(filepath, merged)
Save a batch of phrases to a single image grid. Arguments --------- filepath : str Path to save the image grid. phrases : np.array, ndim=5 The phrase array. Shape is (num_phrase, num_bar, num_time_step, num_pitch, num_track). shape : list or tuple of int Shape of the image grid. (height, width) inverted : bool True to invert the colors. Default to True. grid_width : int Width of the grid lines. Default to 3. grid_color : int Color of the grid lines. Available values are 0 (black) to 255 (white). Default to 0. frame : bool True to add frame. Default to True.
save_image
python
salu133445/musegan
v2/musegan/utils/image_io.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/image_io.py
MIT
def get_tonal_matrix(r1=1.0, r2=1.0, r3=0.5): """Compute and return a tonal matrix for computing the tonal distance [1]. Default argument values are set as suggested by the paper. [1] Christopher Harte, Mark Sandler, and Martin Gasser. Detecting harmonic change in musical audio. In Proc. ACM MM Workshop on Audio and Music Computing Multimedia, 2006. """ tonal_matrix = np.empty((6, 12)) tonal_matrix[0] = r1 * np.sin(np.arange(12) * (7. / 6.) * np.pi) tonal_matrix[1] = r1 * np.cos(np.arange(12) * (7. / 6.) * np.pi) tonal_matrix[2] = r2 * np.sin(np.arange(12) * (3. / 2.) * np.pi) tonal_matrix[3] = r2 * np.cos(np.arange(12) * (3. / 2.) * np.pi) tonal_matrix[4] = r3 * np.sin(np.arange(12) * (2. / 3.) * np.pi) tonal_matrix[5] = r3 * np.cos(np.arange(12) * (2. / 3.) * np.pi) return tonal_matrix
Compute and return a tonal matrix for computing the tonal distance [1]. Default argument values are set as suggested by the paper. [1] Christopher Harte, Mark Sandler, and Martin Gasser. Detecting harmonic change in musical audio. In Proc. ACM MM Workshop on Audio and Music Computing Multimedia, 2006.
get_tonal_matrix
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def get_qualified_note_rate(pianoroll, threshold=2): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.""" padded = np.pad(pianoroll.astype(int), ((1, 1), (0, 0)), 'constant') diff = np.diff(padded, axis=0) flattened = diff.T.reshape(-1,) onsets = (flattened > 0).nonzero()[0] offsets = (flattened < 0).nonzero()[0] num_qualified_note = (offsets - onsets >= threshold).sum() return num_qualified_note / len(onsets)
Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a piano-roll.
get_qualified_note_rate
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def to_chroma(pianoroll): """Return the chroma features (not normalized).""" padded = np.pad(pianoroll, ((0, 0), (0, 12 - pianoroll.shape[1] % 12)), 'constant') return np.sum(np.reshape(padded, (pianoroll.shape[0], 12, -1)), 2)
Return the chroma features (not normalized).
to_chroma
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def tonal_dist(chroma1, chroma2, tonal_matrix=None): """Return the tonal distance between two chroma features.""" if tonal_matrix is None: tonal_matrix = get_tonal_matrix() warnings.warn("`tonal matrix` not specified. Use default tonal matrix", RuntimeWarning) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) chroma1 = chroma1 / np.sum(chroma1) chroma2 = chroma2 / np.sum(chroma2) result1 = np.matmul(tonal_matrix, chroma1) result2 = np.matmul(tonal_matrix, chroma2) return np.linalg.norm(result1 - result2)
Return the tonal distance between two chroma features.
tonal_dist
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def plot_histogram(hist, fig_dir=None, title=None, max_hist_num=None): """Plot the histograms of the statistics""" import matplotlib.pyplot as plt hist = hist[~np.isnan(hist)] u_value = np.unique(hist) hist_num = len(u_value) if max_hist_num is not None: if len(u_value) > max_hist_num: hist_num = max_hist_num fig = plt.figure() plt.hist(hist, hist_num) if title is not None: plt.title(title) if fig_dir is not None and title is not None: fig.savefig(os.path.join(fig_dir, title)) plt.close(fig)
Plot the histograms of the statistics
plot_histogram
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def print_metrics_mat(self, metrics_mat): """Print the intratrack metrics as a nice formatting table""" print(' ' * 12, ' '.join(['{:^14}'.format(metric_name) for metric_name in self.metric_names])) for t, track_name in enumerate(self.track_names): value_str = [] for m in range(len(self.metric_names)): if np.isnan(metrics_mat[m, t]): value_str.append('{:14}'.format('')) else: value_str.append('{:^14}'.format('{:6.4f}'.format( metrics_mat[m, t]))) print('{:12}'.format(track_name), ' '.join(value_str))
Print the intratrack metrics as a nice formatting table
print_metrics_mat
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def print_metrics_pair(self, pair_matrix): """Print the intertrack metrics as a nice formatting table""" for idx, pair in enumerate(self.tonal_distance_pairs): print("{:12} {:12} {:12.5f}".format( self.track_names[pair[0]], self.track_names[pair[1]], pair_matrix[idx]))
Print the intertrack metrics as a nice formatting table
print_metrics_pair
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def eval(self, bars, verbose=False, mat_path=None, fig_dir=None): """Evaluate the input bars with the metrics""" score_matrix = np.empty((len(self.metric_names), len(self.track_names), bars.shape[0])) score_matrix.fill(np.nan) score_pair_matrix = np.zeros((len(self.tonal_distance_pairs), bars.shape[0])) score_pair_matrix.fill(np.nan) for b in range(bars.shape[0]): for t in range(len(self.track_names)): is_empty_bar = ~np.any(bars[b, ..., t]) if self.metric_map[0, t]: score_matrix[0, t, b] = is_empty_bar if is_empty_bar: continue if self.metric_map[1, t]: score_matrix[1, t, b] = get_num_pitch_used(bars[b, ..., t]) if self.metric_map[2, t]: score_matrix[2, t, b] = get_qualified_note_rate( bars[b, ..., t]) if self.metric_map[3, t]: score_matrix[3, t, b] = get_polyphonic_ratio( bars[b, ..., t]) if self.metric_map[4, t]: score_matrix[4, t, b] = get_in_scale( to_chroma(bars[b, ..., t]), self.scale_mask) if self.metric_map[5, t]: score_matrix[5, t, b] = get_drum_pattern(bars[b, ..., t], self.drum_filter) if self.metric_map[6, t]: score_matrix[6, t, b] = get_num_pitch_used( to_chroma(bars[b, ..., t])) for p, pair in enumerate(self.tonal_distance_pairs): score_pair_matrix[p, b] = get_harmonicity( to_chroma(bars[b, ..., pair[0]]), to_chroma(bars[b, ..., pair[1]]), self.beat_resolution, self.tonal_matrix) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) score_matrix_mean = np.nanmean(score_matrix, axis=2) score_pair_matrix_mean = np.nanmean(score_pair_matrix, axis=1) if verbose: print("{:=^120}".format(' Evaluation ')) print('Data Size:', bars.shape) print("{:-^120}".format('Intratrack Evaluation')) self.print_metrics_mat(score_matrix_mean) print("{:-^120}".format('Intertrack Evaluation')) self.print_metrics_pair(score_pair_matrix_mean) if fig_dir is not None: if not os.path.exists(fig_dir): os.makedirs(fig_dir) if verbose: print('[*] Plotting...') for m, metric_name in enumerate(self.metric_names): for t, track_name in enumerate(self.track_names): if self.metric_map[m, t]: temp = '-'.join(track_name.replace('.', ' ').split()) title = '_'.join([metric_name, temp]) plot_histogram(score_matrix[m, t], fig_dir=fig_dir, title=title, max_hist_num=20) if verbose: print("Successfully saved to", fig_dir) if mat_path is not None: if not mat_path.endswith(".npy"): mat_path = mat_path + '.npy' info_dict = { 'score_matrix_mean': score_matrix_mean, 'score_pair_matrix_mean': score_pair_matrix_mean} if verbose: print('[*] Saving score matrices...') np.save(mat_path, info_dict) if verbose: print("Successfully saved to", mat_path) return score_matrix_mean, score_pair_matrix_mean
Evaluate the input bars with the metrics
eval
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def eval_dataset(filepath, result_dir, location, config): """Run evaluation on a dataset stored in either shared array (if `location` is 'sa') or in hard disk (if `location` is 'hd') and save the results to the given directory. """ def load_data(filepath, location): """Load and return the training data.""" print('[*] Loading data...') # Load data from SharedArray if location == 'sa': import SharedArray as sa data = sa.attach(filepath) # Load data from hard disk elif location == 'hd': if os.path.isabs(filepath): data = np.load(filepath) else: root = os.path.dirname(os.path.dirname( os.path.realpath(__file__))) data = np.load(os.path.abspath(os.path.join( root, 'training_data', filepath))) else: raise ValueError("Unrecognized value for `location`") # Reshape data data = data.reshape(-1, config['num_timestep'], config['num_pitch'], config['num_track']) return data print('[*] Loading data...') data = load_data(filepath, location) print('[*] Running evaluation') metrics = Metrics(config) _ = metrics.eval(data, verbose=True, mat_path=os.path.join(result_dir, 'score_matrices.npy'), fig_dir=result_dir)
Run evaluation on a dataset stored in either shared array (if `location` is 'sa') or in hard disk (if `location` is 'hd') and save the results to the given directory.
eval_dataset
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def load_data(filepath, location): """Load and return the training data.""" print('[*] Loading data...') # Load data from SharedArray if location == 'sa': import SharedArray as sa data = sa.attach(filepath) # Load data from hard disk elif location == 'hd': if os.path.isabs(filepath): data = np.load(filepath) else: root = os.path.dirname(os.path.dirname( os.path.realpath(__file__))) data = np.load(os.path.abspath(os.path.join( root, 'training_data', filepath))) else: raise ValueError("Unrecognized value for `location`") # Reshape data data = data.reshape(-1, config['num_timestep'], config['num_pitch'], config['num_track']) return data
Load and return the training data.
load_data
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def print_mat_file(mat_path, config): """Print the score matrices stored in a file.""" metrics = Metrics(config) with np.load(mat_path) as loaded: metrics.print_metrics_mat(loaded['score_matrix_mean']) metrics.print_metrics_pair(loaded['score_pair_matrix_mean'])
Print the score matrices stored in a file.
print_mat_file
python
salu133445/musegan
v2/musegan/utils/metrics.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/metrics.py
MIT
def write_midi(filepath, pianorolls, program_nums=None, is_drums=None, track_names=None, velocity=100, tempo=120.0, beat_resolution=24): """ Write the given piano-roll(s) to a single MIDI file. Arguments --------- filepath : str Path to save the MIDI file. pianorolls : np.array, ndim=3 The piano-roll array to be written to the MIDI file. Shape is (num_timestep, num_pitch, num_track). program_nums : int or list of int MIDI program number(s) to be assigned to the MIDI track(s). Available values are 0 to 127. Must have the same length as `pianorolls`. is_drums : list of bool Drum indicator(s) to be assigned to the MIDI track(s). True for drums. False for other instruments. Must have the same length as `pianorolls`. track_names : list of str Track name(s) to be assigned to the MIDI track(s). """ if not np.issubdtype(pianorolls.dtype, np.bool_): raise TypeError("Support only binary-valued piano-rolls") if isinstance(program_nums, int): program_nums = [program_nums] if isinstance(is_drums, int): is_drums = [is_drums] if pianorolls.shape[2] != len(program_nums): raise ValueError("`pianorolls` and `program_nums` must have the same" "length") if pianorolls.shape[2] != len(is_drums): raise ValueError("`pianorolls` and `is_drums` must have the same" "length") if program_nums is None: program_nums = [0] * len(pianorolls) if is_drums is None: is_drums = [False] * len(pianorolls) multitrack = Multitrack(beat_resolution=beat_resolution, tempo=tempo) for idx in range(pianorolls.shape[2]): if track_names is None: track = Track(pianorolls[..., idx], program_nums[idx], is_drums[idx]) else: track = Track(pianorolls[..., idx], program_nums[idx], is_drums[idx], track_names[idx]) multitrack.append_track(track) multitrack.write(filepath)
Write the given piano-roll(s) to a single MIDI file. Arguments --------- filepath : str Path to save the MIDI file. pianorolls : np.array, ndim=3 The piano-roll array to be written to the MIDI file. Shape is (num_timestep, num_pitch, num_track). program_nums : int or list of int MIDI program number(s) to be assigned to the MIDI track(s). Available values are 0 to 127. Must have the same length as `pianorolls`. is_drums : list of bool Drum indicator(s) to be assigned to the MIDI track(s). True for drums. False for other instruments. Must have the same length as `pianorolls`. track_names : list of str Track name(s) to be assigned to the MIDI track(s).
write_midi
python
salu133445/musegan
v2/musegan/utils/midi_io.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/midi_io.py
MIT
def save_midi(filepath, phrases, config): """ Save a batch of phrases to a single MIDI file. Arguments --------- filepath : str Path to save the image grid. phrases : list of np.array Phrase arrays to be saved. All arrays must have the same shape. pause : int Length of pauses (in timestep) to be inserted between phrases. Default to 0. """ if not np.issubdtype(phrases.dtype, np.bool_): raise TypeError("Support only binary-valued piano-rolls") reshaped = phrases.reshape(-1, phrases.shape[1] * phrases.shape[2], phrases.shape[3], phrases.shape[4]) pad_width = ((0, 0), (0, config['pause_between_samples']), (config['lowest_pitch'], 128 - config['lowest_pitch'] - config['num_pitch']), (0, 0)) padded = np.pad(reshaped, pad_width, 'constant') pianorolls = padded.reshape(-1, padded.shape[2], padded.shape[3]) write_midi(filepath, pianorolls, config['programs'], config['is_drums'], tempo=config['tempo'])
Save a batch of phrases to a single MIDI file. Arguments --------- filepath : str Path to save the image grid. phrases : list of np.array Phrase arrays to be saved. All arrays must have the same shape. pause : int Length of pauses (in timestep) to be inserted between phrases. Default to 0.
save_midi
python
salu133445/musegan
v2/musegan/utils/midi_io.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/midi_io.py
MIT
def binary_round(x): """ Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1}, using the straight through estimator for the gradient. """ g = tf.get_default_graph() with ops.name_scope("BinaryRound") as name: with g.gradient_override_map({"Round": "Identity"}): return tf.round(x, name=name)
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1}, using the straight through estimator for the gradient.
binary_round
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def bernoulli_sample(x): """ Uses a tensor whose values are in [0,1] to sample a tensor with values in {0, 1}, using the straight through estimator for the gradient. E.g., if x is 0.6, bernoulliSample(x) will be 1 with probability 0.6, and 0 otherwise, and the gradient will be pass-through (identity). """ g = tf.get_default_graph() with ops.name_scope("BernoulliSample") as name: with g.gradient_override_map({"Ceil": "Identity", "Sub": "BernoulliSample_ST"}): return tf.ceil(x - tf.random_uniform(tf.shape(x)), name=name)
Uses a tensor whose values are in [0,1] to sample a tensor with values in {0, 1}, using the straight through estimator for the gradient. E.g., if x is 0.6, bernoulliSample(x) will be 1 with probability 0.6, and 0 otherwise, and the gradient will be pass-through (identity).
bernoulli_sample
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def pass_through_sigmoid(x, slope=1): """Sigmoid that uses identity function as its gradient""" g = tf.get_default_graph() with ops.name_scope("PassThroughSigmoid") as name: with g.gradient_override_map({"Sigmoid": "Identity"}): return tf.sigmoid(x, name=name)
Sigmoid that uses identity function as its gradient
pass_through_sigmoid
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def binary_stochastic_ST(x, slope_tensor=None, pass_through=True, stochastic=True): """ Sigmoid followed by either a random sample from a bernoulli distribution according to the result (binary stochastic neuron) (default), or a sigmoid followed by a binary step function (if stochastic == False). Uses the straight through estimator. See https://arxiv.org/abs/1308.3432. Arguments: * x: the pre-activation / logit tensor * slope_tensor: if passThrough==False, slope adjusts the slope of the sigmoid function for purposes of the Slope Annealing Trick (see http://arxiv.org/abs/1609.01704) * pass_through: if True (default), gradient of the entire function is 1 or 0; if False, gradient of 1 is scaled by the gradient of the sigmoid (required if Slope Annealing Trick is used) * stochastic: binary stochastic neuron if True (default), or step function if False """ if slope_tensor is None: slope_tensor = tf.constant(1.0) if pass_through: p = pass_through_sigmoid(x) else: p = tf.sigmoid(slope_tensor * x) if stochastic: return bernoulli_sample(p), p else: return binary_round(p), p
Sigmoid followed by either a random sample from a bernoulli distribution according to the result (binary stochastic neuron) (default), or a sigmoid followed by a binary step function (if stochastic == False). Uses the straight through estimator. See https://arxiv.org/abs/1308.3432. Arguments: * x: the pre-activation / logit tensor * slope_tensor: if passThrough==False, slope adjusts the slope of the sigmoid function for purposes of the Slope Annealing Trick (see http://arxiv.org/abs/1609.01704) * pass_through: if True (default), gradient of the entire function is 1 or 0; if False, gradient of 1 is scaled by the gradient of the sigmoid (required if Slope Annealing Trick is used) * stochastic: binary stochastic neuron if True (default), or step function if False
binary_stochastic_ST
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def binary_stochastic_REINFORCE(x, loss_op_name="loss_by_example"): """ Sigmoid followed by a random sample from a bernoulli distribution according to the result (binary stochastic neuron). Uses the REINFORCE estimator. See https://arxiv.org/abs/1308.3432. NOTE: Requires a loss operation with name matching the argument for loss_op_name in the graph. This loss operation should be broken out by example (i.e., not a single number for the entire batch). """ g = tf.get_default_graph() with ops.name_scope("BinaryStochasticREINFORCE"): with g.gradient_override_map({"Sigmoid": "BinaryStochastic_REINFORCE", "Ceil": "Identity"}): p = tf.sigmoid(x) reinforce_collection = g.get_collection("REINFORCE") if not reinforce_collection: g.add_to_collection("REINFORCE", {}) reinforce_collection = g.get_collection("REINFORCE") reinforce_collection[0][p.op.name] = loss_op_name return tf.ceil(p - tf.random_uniform(tf.shape(x)))
Sigmoid followed by a random sample from a bernoulli distribution according to the result (binary stochastic neuron). Uses the REINFORCE estimator. See https://arxiv.org/abs/1308.3432. NOTE: Requires a loss operation with name matching the argument for loss_op_name in the graph. This loss operation should be broken out by example (i.e., not a single number for the entire batch).
binary_stochastic_REINFORCE
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def _binaryStochastic_REINFORCE(op, _): """Unbiased estimator for binary stochastic function based on REINFORCE.""" loss_op_name = op.graph.get_collection("REINFORCE")[0][op.name] loss_tensor = op.graph.get_operation_by_name(loss_op_name).outputs[0] sub_tensor = op.outputs[0].consumers()[0].outputs[0] #subtraction tensor ceil_tensor = sub_tensor.consumers()[0].outputs[0] #ceiling tensor outcome_diff = (ceil_tensor - op.outputs[0]) # Provides an early out if we want to avoid variance adjustment for # whatever reason (e.g., to show that variance adjustment helps) if op.graph.get_collection("REINFORCE")[0].get("no_variance_adj"): return outcome_diff * tf.expand_dims(loss_tensor, 1) outcome_diff_sq = tf.square(outcome_diff) outcome_diff_sq_r = tf.reduce_mean(outcome_diff_sq, reduction_indices=0) outcome_diff_sq_loss_r = tf.reduce_mean( outcome_diff_sq * tf.expand_dims(loss_tensor, 1), reduction_indices=0) l_bar_num = tf.Variable(tf.zeros(outcome_diff_sq_r.get_shape()), trainable=False) l_bar_den = tf.Variable(tf.ones(outcome_diff_sq_r.get_shape()), trainable=False) # Note: we already get a decent estimate of the average from the minibatch decay = 0.95 train_l_bar_num = tf.assign(l_bar_num, l_bar_num*decay +\ outcome_diff_sq_loss_r*(1-decay)) train_l_bar_den = tf.assign(l_bar_den, l_bar_den*decay +\ outcome_diff_sq_r*(1-decay)) with tf.control_dependencies([train_l_bar_num, train_l_bar_den]): l_bar = train_l_bar_num/(train_l_bar_den + 1e-4) l = tf.tile(tf.expand_dims(loss_tensor, 1), tf.constant([1, l_bar.get_shape().as_list()[0]])) return outcome_diff * (l - l_bar)
Unbiased estimator for binary stochastic function based on REINFORCE.
_binaryStochastic_REINFORCE
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def binary_wrapper(pre_activations_tensor, estimator, stochastic_tensor=tf.constant(True), pass_through=True, slope_tensor=tf.constant(1.0)): """ Turns a layer of pre-activations (logits) into a layer of binary stochastic neurons Keyword arguments: *estimator: either ST or REINFORCE *stochastic_tensor: a boolean tensor indicating whether to sample from a bernoulli distribution (True, default) or use a step_function (e.g., for inference) *pass_through: for ST only - boolean as to whether to substitute identity derivative on the backprop (True, default), or whether to use the derivative of the sigmoid *slope_tensor: for ST only - tensor specifying the slope for purposes of slope annealing trick """ if estimator == 'straight_through': if pass_through: return tf.cond( stochastic_tensor, lambda: binary_stochastic_ST(pre_activations_tensor), lambda: binary_stochastic_ST(pre_activations_tensor, stochastic=False)) else: return tf.cond( stochastic_tensor, lambda: binary_stochastic_ST(pre_activations_tensor, slope_tensor, False), lambda: binary_stochastic_ST(pre_activations_tensor, slope_tensor, False, False)) elif estimator == 'reinforce': # binaryStochastic_REINFORCE was designed to only be stochastic, so # using the ST version for the step fn for purposes of using step # fn at evaluation / not for training return tf.cond( stochastic_tensor, lambda: binary_stochastic_REINFORCE(pre_activations_tensor), lambda: binary_stochastic_ST(pre_activations_tensor, stochastic=False)) else: raise ValueError("Unrecognized estimator.")
Turns a layer of pre-activations (logits) into a layer of binary stochastic neurons Keyword arguments: *estimator: either ST or REINFORCE *stochastic_tensor: a boolean tensor indicating whether to sample from a bernoulli distribution (True, default) or use a step_function (e.g., for inference) *pass_through: for ST only - boolean as to whether to substitute identity derivative on the backprop (True, default), or whether to use the derivative of the sigmoid *slope_tensor: for ST only - tensor specifying the slope for purposes of slope annealing trick
binary_wrapper
python
salu133445/musegan
v2/musegan/utils/ops.py
https://github.com/salu133445/musegan/blob/master/v2/musegan/utils/ops.py
MIT
def parse_arguments(): """Parse and return the command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument('filepath', help="Path to the data file.") parser.add_argument('--name', help="File name to save in SharedArray. " "Default to use the original file name.") parser.add_argument('--prefix', help="Prefix to the file name to save in " "SharedArray. Only effective when " "`name` is not given.") args = parser.parse_args() return args.filepath, args.name, args.prefix
Parse and return the command line arguments.
parse_arguments
python
salu133445/musegan
v2/training_data/store_to_sa.py
https://github.com/salu133445/musegan/blob/master/v2/training_data/store_to_sa.py
MIT
def stream_complete(self, stream_id: int): """ When a stream is complete, we can send our response. """ try: request_data = self.stream_data[stream_id] except KeyError: # Just return, we probably 405'd this already return headers = request_data.headers body = request_data.data.getvalue().decode('utf-8') data = json.dumps( {"headers": headers, "body": body}, indent=4 ).encode("utf8") response_headers = ( (':status', '200'), ('content-type', 'application/json'), ('content-length', str(len(data))), ('server', 'asyncio-h2'), ) self.conn.send_headers(stream_id, response_headers) asyncio.ensure_future(self.send_data(data, stream_id))
When a stream is complete, we can send our response.
stream_complete
python
python-hyper/h2
examples/asyncio/asyncio-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py
MIT
def receive_data(self, data: bytes, flow_controlled_length: int, stream_id: int): """ We've received some data on a stream. If that stream is one we're expecting data on, save it off (and account for the received amount of data in flow control so that the client can send more data). Otherwise, reset the stream. """ try: stream_data = self.stream_data[stream_id] except KeyError: self.conn.reset_stream( stream_id, error_code=ErrorCodes.PROTOCOL_ERROR ) else: stream_data.data.write(data) self.conn.acknowledge_received_data(flow_controlled_length, stream_id)
We've received some data on a stream. If that stream is one we're expecting data on, save it off (and account for the received amount of data in flow control so that the client can send more data). Otherwise, reset the stream.
receive_data
python
python-hyper/h2
examples/asyncio/asyncio-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py
MIT
def stream_reset(self, stream_id): """ A stream reset was sent. Stop sending data. """ if stream_id in self.flow_control_futures: future = self.flow_control_futures.pop(stream_id) future.cancel()
A stream reset was sent. Stop sending data.
stream_reset
python
python-hyper/h2
examples/asyncio/asyncio-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py
MIT
async def send_data(self, data, stream_id): """ Send data according to the flow control rules. """ while data: while self.conn.local_flow_control_window(stream_id) < 1: try: await self.wait_for_flow_control(stream_id) except asyncio.CancelledError: return chunk_size = min( self.conn.local_flow_control_window(stream_id), len(data), self.conn.max_outbound_frame_size, ) try: self.conn.send_data( stream_id, data[:chunk_size], end_stream=(chunk_size == len(data)) ) except (StreamClosedError, ProtocolError): # The stream got closed and we didn't get told. We're done # here. break self.transport.write(self.conn.data_to_send()) data = data[chunk_size:]
Send data according to the flow control rules.
send_data
python
python-hyper/h2
examples/asyncio/asyncio-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py
MIT
async def wait_for_flow_control(self, stream_id): """ Waits for a Future that fires when the flow control window is opened. """ f = asyncio.Future() self.flow_control_futures[stream_id] = f await f
Waits for a Future that fires when the flow control window is opened.
wait_for_flow_control
python
python-hyper/h2
examples/asyncio/asyncio-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py
MIT
def window_updated(self, stream_id, delta): """ A window update frame was received. Unblock some number of flow control Futures. """ if stream_id and stream_id in self.flow_control_futures: f = self.flow_control_futures.pop(stream_id) f.set_result(delta) elif not stream_id: for f in self.flow_control_futures.values(): f.set_result(delta) self.flow_control_futures = {}
A window update frame was received. Unblock some number of flow control Futures.
window_updated
python
python-hyper/h2
examples/asyncio/asyncio-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py
MIT
def connection_made(self, transport): """ The connection has been made. Here we need to save off our transport, do basic HTTP/2 connection setup, and then start our data writing coroutine. """ self.transport = transport self.conn.initiate_connection() self.transport.write(self.conn.data_to_send()) self._send_loop_task = self._loop.create_task(self.sending_loop())
The connection has been made. Here we need to save off our transport, do basic HTTP/2 connection setup, and then start our data writing coroutine.
connection_made
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def window_opened(self, event): """ The flow control window got opened. This is important because it's possible that we were unable to send some WSGI data because the flow control window was too small. If that happens, the sending_loop coroutine starts buffering data. As the window gets opened, we need to unbuffer the data. We do that by placing the data chunks back on the back of the send queue and letting the sending loop take another shot at sending them. This system only works because we require that each stream only have *one* data chunk in the sending queue at any time. The threading events force this invariant to remain true. """ if event.stream_id: # This is specific to a single stream. if event.stream_id in self._flow_controlled_data: self._stream_data.put_nowait( self._flow_controlled_data.pop(event.stream_id) ) else: # This event is specific to the connection. Free up *all* the # streams. This is a bit tricky, but we *must not* yield the flow # of control here or it all goes wrong. for data in self._flow_controlled_data.values(): self._stream_data.put_nowait(data) self._flow_controlled_data.clear()
The flow control window got opened. This is important because it's possible that we were unable to send some WSGI data because the flow control window was too small. If that happens, the sending_loop coroutine starts buffering data. As the window gets opened, we need to unbuffer the data. We do that by placing the data chunks back on the back of the send queue and letting the sending loop take another shot at sending them. This system only works because we require that each stream only have *one* data chunk in the sending queue at any time. The threading events force this invariant to remain true.
window_opened
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
async def sending_loop(self): """ A call that loops forever, attempting to send data. This sending loop contains most of the flow-control smarts of this class: it pulls data off of the asyncio queue and then attempts to send it. The difficulties here are all around flow control. Specifically, a chunk of data may be too large to send. In this case, what will happen is that this coroutine will attempt to send what it can and will then store the unsent data locally. When a flow control event comes in that data will be freed up and placed back onto the asyncio queue, causing it to pop back up into the sending logic of this coroutine. This method explicitly *does not* handle HTTP/2 priority. That adds an extra layer of complexity to what is already a fairly complex method, and we'll look at how to do it another time. This coroutine explicitly *does not end*. """ while True: stream_id, data, event = await self._stream_data.get() # If this stream got reset, just drop the data on the floor. Note # that we need to reset the event here to make sure that # application doesn't lock up. if stream_id in self._reset_streams: event.set() # Check if the body is done. If it is, this is really easy! Again, # we *must* set the event here or the application will lock up. if data is END_DATA_SENTINEL: self.conn.end_stream(stream_id) self.transport.write(self.conn.data_to_send()) event.set() continue # We need to send data, but not to exceed the flow control window. # For that reason, grab only the data that fits: we'll buffer the # rest. window_size = self.conn.local_flow_control_window(stream_id) chunk_size = min(window_size, len(data)) data_to_send = data[:chunk_size] data_to_buffer = data[chunk_size:] if data_to_send: # There's a maximum frame size we have to respect. Because we # aren't paying any attention to priority here, we can quite # safely just split this string up into chunks of max frame # size and blast them out. # # In a *real* application you'd want to consider priority here. max_size = self.conn.max_outbound_frame_size chunks = ( data_to_send[x:x+max_size] for x in range(0, len(data_to_send), max_size) ) for chunk in chunks: self.conn.send_data(stream_id, chunk) self.transport.write(self.conn.data_to_send()) # If there's data left to buffer, we should do that. Put it in a # dictionary and *don't set the event*: the app must not generate # any more data until we got rid of all of this data. if data_to_buffer: self._flow_controlled_data[stream_id] = ( stream_id, data_to_buffer, event ) else: # We sent everything. We can let the WSGI app progress. event.set()
A call that loops forever, attempting to send data. This sending loop contains most of the flow-control smarts of this class: it pulls data off of the asyncio queue and then attempts to send it. The difficulties here are all around flow control. Specifically, a chunk of data may be too large to send. In this case, what will happen is that this coroutine will attempt to send what it can and will then store the unsent data locally. When a flow control event comes in that data will be freed up and placed back onto the asyncio queue, causing it to pop back up into the sending logic of this coroutine. This method explicitly *does not* handle HTTP/2 priority. That adds an extra layer of complexity to what is already a fairly complex method, and we'll look at how to do it another time. This coroutine explicitly *does not end*.
sending_loop
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def request_received(self, event): """ A HTTP/2 request has been received. We need to invoke the WSGI application in a background thread to handle it. """ # First, we are going to want an object to hold all the relevant state # for this request/response. For that, we have a stream object. We # need to store the stream object somewhere reachable for when data # arrives later. s = Stream(event.stream_id, self) self.streams[event.stream_id] = s # Next, we need to build the WSGI environ dictionary. environ = _build_environ_dict(event.headers, s) # Finally, we want to throw these arguments out to a threadpool and # let it run. self._loop.run_in_executor( None, s.run_in_threadpool, APPLICATION, environ, )
A HTTP/2 request has been received. We need to invoke the WSGI application in a background thread to handle it.
request_received
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def data_frame_received(self, event): """ Data has been received by WSGI server and needs to be dispatched to a running application. Note that the flow control window is not modified here. That's deliberate: see Stream.__next__ for a longer discussion of why. """ # Grab the stream in question from our dictionary and pass it on. stream = self.streams[event.stream_id] stream.receive_data(event.data, event.flow_controlled_length)
Data has been received by WSGI server and needs to be dispatched to a running application. Note that the flow control window is not modified here. That's deliberate: see Stream.__next__ for a longer discussion of why.
data_frame_received
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def reset_stream(self, event): """ A stream got forcefully reset. This is a tricky thing to deal with because WSGI doesn't really have a good notion for it. Essentially, you have to let the application run until completion, but not actually let it send any data. We do that by discarding any data we currently have for it, and then marking the stream as reset to allow us to spot when that stream is trying to send data and drop that data on the floor. We then *also* signal the WSGI application that no more data is incoming, to ensure that it does not attempt to do further reads of the data. """ if event.stream_id in self._flow_controlled_data: del self._flow_controlled_data[event.stream_id] self._reset_streams.add(event.stream_id) self.end_stream(event)
A stream got forcefully reset. This is a tricky thing to deal with because WSGI doesn't really have a good notion for it. Essentially, you have to let the application run until completion, but not actually let it send any data. We do that by discarding any data we currently have for it, and then marking the stream as reset to allow us to spot when that stream is trying to send data and drop that data on the floor. We then *also* signal the WSGI application that no more data is incoming, to ensure that it does not attempt to do further reads of the data.
reset_stream
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def data_for_stream(self, stream_id, data): """ Thread-safe method called from outside the main asyncio thread in order to send data on behalf of a WSGI application. Places data being written by a stream on an asyncio queue. Returns a threading event that will fire when that data is sent. """ event = threading.Event() self._loop.call_soon_threadsafe( self._stream_data.put_nowait, (stream_id, data, event) ) return event
Thread-safe method called from outside the main asyncio thread in order to send data on behalf of a WSGI application. Places data being written by a stream on an asyncio queue. Returns a threading event that will fire when that data is sent.
data_for_stream
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def open_flow_control_window(self, stream_id, increment): """ Opens a flow control window for the given stream by the given amount. Called from a WSGI thread. Does not return an event because there's no need to block on this action, it may take place at any time. """ def _inner_open(stream_id, increment): self.conn.increment_flow_control_window(increment, stream_id) self.conn.increment_flow_control_window(increment, None) self.transport.write(self.conn.data_to_send()) self._loop.call_soon_threadsafe( _inner_open, stream_id, increment, )
Opens a flow control window for the given stream by the given amount. Called from a WSGI thread. Does not return an event because there's no need to block on this action, it may take place at any time.
open_flow_control_window
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def run_in_threadpool(self, wsgi_application, environ): """ This method should be invoked in a threadpool. At the point this method is invoked, the only safe methods to call from the original thread are ``receive_data`` and ``request_complete``: any other method is unsafe. This method handles the WSGI logic. It invokes the application callable in this thread, passing control over to the WSGI application. It then ensures that the data makes it back to the HTTP/2 connection via the thread-safe APIs provided below. """ result = wsgi_application(environ, self.start_response) try: for data in result: self.write(data) finally: # This signals that we're done with data. The server will know that # this allows it to clean up its state: we're done here. self.write(END_DATA_SENTINEL)
This method should be invoked in a threadpool. At the point this method is invoked, the only safe methods to call from the original thread are ``receive_data`` and ``request_complete``: any other method is unsafe. This method handles the WSGI logic. It invokes the application callable in this thread, passing control over to the WSGI application. It then ensures that the data makes it back to the HTTP/2 connection via the thread-safe APIs provided below.
run_in_threadpool
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def read(self, size=None): """ Called by the WSGI application to read data. This method is the one of two that explicitly pumps the input data queue, which means it deals with the ``_complete`` flag and the ``END_DATA_SENTINEL``. """ # If we've already seen the END_DATA_SENTINEL, return immediately. if self._complete: return b'' # If we've been asked to read everything, just iterate over ourselves. if size is None: return b''.join(self) # Otherwise, as long as we don't have enough data, spin looking for # another data chunk. data = b'' while len(data) < size: try: chunk = next(self) except StopIteration: break # Concatenating strings this way is slow, but that's ok, this is # just a demo. data += chunk # We have *at least* enough data to return, but we may have too much. # If we do, throw it on a buffer: we'll use it later. to_return = data[:size] self._temp_buffer = data[size:] return to_return
Called by the WSGI application to read data. This method is the one of two that explicitly pumps the input data queue, which means it deals with the ``_complete`` flag and the ``END_DATA_SENTINEL``.
read
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def readlines(self, hint=None): """ Called by the WSGI application to read several lines of data. """ data = self.read(hint) lines = data.splitlines(keepends=True) return lines
Called by the WSGI application to read several lines of data.
readlines
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def start_response(self, status, response_headers, exc_info=None): """ This is the PEP-3333 mandated start_response callable. All it does is store the headers for later sending, and return our ```write`` callable. """ if self._headers_emitted and exc_info is not None: raise exc_info[1].with_traceback(exc_info[2]) assert not self._response_status or exc_info is not None self._response_status = status self._response_headers = response_headers return self.write
This is the PEP-3333 mandated start_response callable. All it does is store the headers for later sending, and return our ```write`` callable.
start_response
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def write(self, data): """ Provides some data to write. This function *blocks* until such time as the data is allowed by HTTP/2 flow control. This allows a client to slow or pause the response as needed. This function is not supposed to be used, according to PEP-3333, but once we have it it becomes quite convenient to use it, so this app actually runs all writes through this function. """ if not self._headers_emitted: self._emit_headers() event = self._protocol.data_for_stream(self.stream_id, data) event.wait() return
Provides some data to write. This function *blocks* until such time as the data is allowed by HTTP/2 flow control. This allows a client to slow or pause the response as needed. This function is not supposed to be used, according to PEP-3333, but once we have it it becomes quite convenient to use it, so this app actually runs all writes through this function.
write
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def _emit_headers(self): """ Sends the response headers. This is only called from the write callable and should only ever be called once. It does some minor processing (converts the status line into a status code because reason phrases are evil) and then passes the headers on to the server. This call explicitly blocks until the server notifies us that the headers have reached the network. """ assert self._response_status and self._response_headers assert not self._headers_emitted self._headers_emitted = True # We only need the status code status = self._response_status.split(" ", 1)[0] headers = [(":status", status)] headers.extend(self._response_headers) event = self._protocol.send_response(self.stream_id, headers) event.wait() return
Sends the response headers. This is only called from the write callable and should only ever be called once. It does some minor processing (converts the status line into a status code because reason phrases are evil) and then passes the headers on to the server. This call explicitly blocks until the server notifies us that the headers have reached the network.
_emit_headers
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT
def _build_environ_dict(headers, stream): """ Build the WSGI environ dictionary for a given request. To do that, we'll temporarily create a dictionary for the headers. While this isn't actually a valid way to represent headers, we know that the special headers we need can only have one appearance in the block. This code is arguably somewhat incautious: the conversion to dictionary should only happen in a way that allows us to correctly join headers that appear multiple times. That's acceptable in a demo app: in a productised version you'd want to fix it. """ header_dict = dict(headers) path = header_dict.pop(':path') try: path, query = path.split('?', 1) except ValueError: query = "" server_name = header_dict.pop(':authority') try: server_name, port = server_name.split(':', 1) except ValueError: port = "8443" environ = { 'REQUEST_METHOD': header_dict.pop(':method'), 'SCRIPT_NAME': '', 'PATH_INFO': path, 'QUERY_STRING': query, 'SERVER_NAME': server_name, 'SERVER_PORT': port, 'SERVER_PROTOCOL': 'HTTP/2', 'HTTPS': "on", 'SSL_PROTOCOL': 'TLSv1.2', 'wsgi.version': (1, 0), 'wsgi.url_scheme': header_dict.pop(':scheme'), 'wsgi.input': stream, 'wsgi.errors': sys.stderr, 'wsgi.multithread': True, 'wsgi.multiprocess': False, 'wsgi.run_once': False, } if 'content-type' in header_dict: environ['CONTENT_TYPE'] = header_dict.pop('content-type') if 'content-length' in header_dict: environ['CONTENT_LENGTH'] = header_dict.pop('content-length') for name, value in header_dict.items(): environ['HTTP_' + name.upper()] = value return environ
Build the WSGI environ dictionary for a given request. To do that, we'll temporarily create a dictionary for the headers. While this isn't actually a valid way to represent headers, we know that the special headers we need can only have one appearance in the block. This code is arguably somewhat incautious: the conversion to dictionary should only happen in a way that allows us to correctly join headers that appear multiple times. That's acceptable in a demo app: in a productised version you'd want to fix it.
_build_environ_dict
python
python-hyper/h2
examples/asyncio/wsgi-server.py
https://github.com/python-hyper/h2/blob/master/examples/asyncio/wsgi-server.py
MIT