code stringlengths 101 5.91M |
|---|
class DiagonalGaussian(Distribution):
def __init__(self, dim):
self._dim = dim
def dim(self):
return self._dim
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info['mean']
old_log_stds = old_dist_info['log_std']
new_means = new_dist_info['mean']
new_log_stds = new_dist_info['log_std']
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
numerator = ((np.square((old_means - new_means)) + np.square(old_std)) - np.square(new_std))
denominator = ((2 * np.square(new_std)) + 1e-08)
return np.sum((((numerator / denominator) + new_log_stds) - old_log_stds), axis=(- 1))
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars['mean']
old_log_stds = old_dist_info_vars['log_std']
new_means = new_dist_info_vars['mean']
new_log_stds = new_dist_info_vars['log_std']
old_std = tf.exp(old_log_stds)
new_std = tf.exp(new_log_stds)
numerator = ((tf.square((old_means - new_means)) + tf.square(old_std)) - tf.square(new_std))
denominator = ((2 * tf.square(new_std)) + 1e-08)
return tf.reduce_sum((((numerator / denominator) + new_log_stds) - old_log_stds), axis=(- 1))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return tf.exp((logli_new - logli_old))
def log_likelihood_sym(self, x_var, dist_info_vars):
means = dist_info_vars['mean']
log_stds = dist_info_vars['log_std']
zs = ((x_var - means) / tf.exp(log_stds))
return (((- tf.reduce_sum(log_stds, axis=(- 1))) - (0.5 * tf.reduce_sum(tf.square(zs), axis=(- 1)))) - ((0.5 * self.dim) * np.log((2 * np.pi))))
def sample(self, dist_info):
means = dist_info['mean']
log_stds = dist_info['log_std']
rnd = np.random.normal(size=means.shape)
return ((rnd * np.exp(log_stds)) + means)
def log_likelihood(self, xs, dist_info):
means = dist_info['mean']
log_stds = dist_info['log_std']
zs = ((xs - means) / np.exp(log_stds))
return (((- np.sum(log_stds, axis=(- 1))) - (0.5 * np.sum(np.square(zs), axis=(- 1)))) - ((0.5 * self.dim) * np.log((2 * np.pi))))
def entropy(self, dist_info):
log_stds = dist_info['log_std']
return np.sum((log_stds + np.log(np.sqrt(((2 * np.pi) * np.e)))), axis=(- 1))
def dist_info_specs(self):
return [('mean', (self.dim,)), ('log_std', (self.dim,))] |
def main(argv):
parser = argparse.ArgumentParser(description='Dump dataset or subset of dataset into external HDF dataset')
parser.add_argument('config_file_or_dataset', type=str, help='Config file for RETURNN, or directly the dataset init string')
parser.add_argument('hdf_filename', type=str, help='File name of the HDF dataset, which will be created')
parser.add_argument('--start_seq', type=int, default=0, help='Start sequence index of the dataset to dump')
parser.add_argument('--end_seq', type=int, default=float('inf'), help='End sequence index of the dataset to dump')
parser.add_argument('--epoch', type=int, default=1, help='Optional start epoch for initialization')
args = parser.parse_args(argv[1:])
returnn_config = None
dataset_config_str = None
if _is_crnn_config(args.config_file_or_dataset):
returnn_config = args.config_file_or_dataset
else:
dataset_config_str = args.config_file_or_dataset
dataset = init(config_filename=returnn_config, cmd_line_opts=[], dataset_config_str=dataset_config_str)
hdf_dataset = hdf_dataset_init(args.hdf_filename)
hdf_dump_from_dataset(dataset, hdf_dataset, args)
hdf_close(hdf_dataset)
rnn.finalize() |
def convert_conv2convsamepadding_model(module, process_group=None, channel_last=False):
mod = module
if isinstance(module, torch.nn.modules.conv._ConvNd):
if isinstance(module.bias, torch.Tensor):
bias = True
else:
bias = False
mod = ops.Conv2dSamePadding(module.in_channels, module.out_channels, module.kernel_size, module.stride, module.dilation, module.groups, bias=bias)
mod.weight.data = module.weight.data.clone().detach()
if bias:
mod.bias.data = module.bias.data.clone().detach()
for (name, child) in module.named_children():
mod.add_module(name, convert_conv2convsamepadding_model(child, process_group=process_group, channel_last=channel_last))
del module
return mod |
def duplicate_transition_add_input(old_transition, new_transition):
if (isinstance(old_transition.word_in, Iterable) and (len(old_transition.word_in) == 1) and isinstance(new_transition.word_in, Iterable) and (len(new_transition.word_in) == 1)):
old_transition.word_in = [(old_transition.word_in[0] + new_transition.word_in[0])]
else:
raise TypeError((('Trying to use duplicate_transition_add_input on ' + ('"%s" and "%s", ' % (old_transition, new_transition))) + 'but input words are assumed to be lists of length 1'))
return old_transition |
class DRIT(object):
def __init__(self, sess, args):
self.model_name = 'DRIT'
self.sess = sess
self.checkpoint_dir = args.checkpoint_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.sample_dir = args.sample_dir
self.dataset_name = args.dataset
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.decay_epoch = args.decay_epoch
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.num_attribute = args.num_attribute
self.guide_img = args.guide_img
self.direction = args.direction
self.img_size = args.img_size
self.img_ch = args.img_ch
self.init_lr = args.lr
self.content_init_lr = (args.lr / 2.5)
self.ch = args.ch
self.concat = args.concat
self.content_adv_w = args.content_adv_w
self.domain_adv_w = args.domain_adv_w
self.cycle_w = args.cycle_w
self.recon_w = args.recon_w
self.latent_w = args.latent_w
self.kl_w = args.kl_w
self.n_layer = args.n_layer
self.n_z = args.n_z
self.n_dis = args.n_dis
self.n_scale = args.n_scale
self.n_d_con = args.n_d_con
self.multi = (True if (args.n_scale > 1) else False)
self.sn = args.sn
self.sample_dir = os.path.join(args.sample_dir, self.model_dir)
check_folder(self.sample_dir)
self.trainA_dataset = glob('./dataset/{}/*.*'.format((self.dataset_name + '/trainA')))
self.trainB_dataset = glob('./dataset/{}/*.*'.format((self.dataset_name + '/trainB')))
self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset))
print('##### Information #####')
print('# gan type : ', self.gan_type)
print('# dataset : ', self.dataset_name)
print('# max dataset number : ', self.dataset_num)
print('# batch_size : ', self.batch_size)
print('# decay_flag : ', self.decay_flag)
print('# epoch : ', self.epoch)
print('# decay_epoch : ', self.decay_epoch)
print('# iteration per epoch : ', self.iteration)
print('# attribute in test phase : ', self.num_attribute)
print()
print('##### Generator #####')
print('# layer : ', self.n_layer)
print('# z dimension : ', self.n_z)
print('# concat : ', self.concat)
print()
print('##### Discriminator #####')
print('# discriminator layer : ', self.n_dis)
print('# multi-scale Dis : ', self.n_scale)
print('# updating iteration of con_dis : ', self.n_d_con)
print('# spectral_norm : ', self.sn)
print()
print('##### Weight #####')
print('# domain_adv_weight : ', self.domain_adv_w)
print('# content_adv_weight : ', self.content_adv_w)
print('# cycle_weight : ', self.cycle_w)
print('# recon_weight : ', self.recon_w)
print('# latent_weight : ', self.latent_w)
print('# kl_weight : ', self.kl_w)
def content_encoder(self, x, is_training=True, reuse=False, scope='content_encoder'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse):
x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv')
x = lrelu(x, 0.01)
for i in range(2):
x = conv(x, (channel * 2), kernel=3, stride=2, pad=1, pad_type='reflect', scope=('conv_' + str(i)))
x = instance_norm(x, scope=('ins_norm_' + str(i)))
x = relu(x)
channel = (channel * 2)
for i in range(1, self.n_layer):
x = resblock(x, channel, scope=('resblock_' + str(i)))
with tf.variable_scope('content_encoder_share', reuse=tf.AUTO_REUSE):
x = resblock(x, channel, scope='resblock_share')
x = gaussian_noise_layer(x, is_training)
return x
def attribute_encoder(self, x, reuse=False, scope='attribute_encoder'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse):
x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv')
x = relu(x)
channel = (channel * 2)
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv_0')
x = relu(x)
channel = (channel * 2)
for i in range(1, self.n_layer):
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope=('conv_' + str(i)))
x = relu(x)
x = global_avg_pooling(x)
x = conv(x, channels=self.n_z, kernel=1, stride=1, scope='attribute_logit')
return x
def attribute_encoder_concat(self, x, reuse=False, scope='attribute_encoder_concat'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse):
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv')
for i in range(1, self.n_layer):
channel = (channel * (i + 1))
x = basic_block(x, channel, scope=('basic_block_' + str(i)))
x = lrelu(x, 0.2)
x = global_avg_pooling(x)
mean = fully_conneted(x, channels=self.n_z, scope='z_mean')
logvar = fully_conneted(x, channels=self.n_z, scope='z_logvar')
return (mean, logvar)
def MLP(self, z, reuse=False, scope='MLP'):
channel = (self.ch * self.n_layer)
with tf.variable_scope(scope, reuse=reuse):
for i in range(2):
z = fully_conneted(z, channel, scope=('fully_' + str(i)))
z = relu(z)
z = fully_conneted(z, (channel * self.n_layer), scope='fully_logit')
return z
def generator(self, x, z, reuse=False, scope='generator'):
channel = (self.ch * self.n_layer)
with tf.variable_scope(scope, reuse=reuse):
z = self.MLP(z, reuse=reuse)
z = tf.split(z, num_or_size_splits=self.n_layer, axis=(- 1))
for i in range(self.n_layer):
x = mis_resblock(x, z[i], channel, scope=('mis_resblock_' + str(i)))
for i in range(2):
x = deconv(x, (channel // 2), kernel=3, stride=2, scope=('deconv_' + str(i)))
x = layer_norm(x, scope=('layer_norm_' + str(i)))
x = relu(x)
channel = (channel // 2)
x = deconv(x, channels=self.img_ch, kernel=1, stride=1, scope='G_logit')
x = tanh(x)
return x
def generator_concat(self, x, z, reuse=False, scope='generator_concat'):
channel = (self.ch * self.n_layer)
with tf.variable_scope('generator_concat_share', reuse=tf.AUTO_REUSE):
x = resblock(x, channel, scope='resblock')
with tf.variable_scope(scope, reuse=reuse):
channel = (channel + self.n_z)
x = expand_concat(x, z)
for i in range(1, self.n_layer):
x = resblock(x, channel, scope=('resblock_' + str(i)))
for i in range(2):
channel = (channel + self.n_z)
x = expand_concat(x, z)
x = deconv(x, (channel // 2), kernel=3, stride=2, scope=('deconv_' + str(i)))
x = layer_norm(x, scope=('layer_norm_' + str(i)))
x = relu(x)
channel = (channel // 2)
x = expand_concat(x, z)
x = deconv(x, channels=self.img_ch, kernel=1, stride=1, scope='G_logit')
x = tanh(x)
return x
def content_discriminator(self, x, reuse=False, scope='content_discriminator'):
D_logit = []
with tf.variable_scope(scope, reuse=reuse):
channel = (self.ch * self.n_layer)
for i in range(3):
x = conv(x, channel, kernel=7, stride=2, pad=1, pad_type='reflect', scope=('conv_' + str(i)))
x = instance_norm(x, scope=('ins_norm_' + str(i)))
x = lrelu(x, 0.01)
x = conv(x, channel, kernel=4, stride=1, scope='conv_3')
x = lrelu(x, 0.01)
x = conv(x, channels=1, kernel=1, stride=1, scope='D_content_logit')
D_logit.append(x)
return D_logit
def multi_discriminator(self, x_init, reuse=False, scope='multi_discriminator'):
D_logit = []
with tf.variable_scope(scope, reuse=reuse):
for scale in range(self.n_scale):
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope=(('ms_' + str(scale)) + 'conv_0'))
x = lrelu(x, 0.01)
for i in range(1, self.n_dis):
x = conv(x, (channel * 2), kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope=((('ms_' + str(scale)) + 'conv_') + str(i)))
x = lrelu(x, 0.01)
channel = (channel * 2)
x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope=(('ms_' + str(scale)) + 'D_logit'))
D_logit.append(x)
x_init = down_sample(x_init)
return D_logit
def discriminator(self, x, reuse=False, scope='discriminator'):
D_logit = []
with tf.variable_scope(scope, reuse=reuse):
channel = self.ch
x = conv(x, channel, kernel=3, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv')
x = lrelu(x, 0.01)
for i in range(1, self.n_dis):
x = conv(x, (channel * 2), kernel=3, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope=('conv_' + str(i)))
x = lrelu(x, 0.01)
channel = (channel * 2)
x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope='D_logit')
D_logit.append(x)
return D_logit
def Encoder_A(self, x_A, is_training=True, random_fake=False, reuse=False):
mean = None
logvar = None
content_A = self.content_encoder(x_A, is_training=is_training, reuse=reuse, scope='content_encoder_A')
if self.concat:
(mean, logvar) = self.attribute_encoder_concat(x_A, reuse=reuse, scope='attribute_encoder_concat_A')
if random_fake:
attribute_A = mean
else:
attribute_A = z_sample(mean, logvar)
else:
attribute_A = self.attribute_encoder(x_A, reuse=reuse, scope='attribute_encoder_A')
return (content_A, attribute_A, mean, logvar)
def Encoder_B(self, x_B, is_training=True, random_fake=False, reuse=False):
mean = None
logvar = None
content_B = self.content_encoder(x_B, is_training=is_training, reuse=reuse, scope='content_encoder_B')
if self.concat:
(mean, logvar) = self.attribute_encoder_concat(x_B, reuse=reuse, scope='attribute_encoder_concat_B')
if random_fake:
attribute_B = mean
else:
attribute_B = z_sample(mean, logvar)
else:
attribute_B = self.attribute_encoder(x_B, reuse=reuse, scope='attribute_encoder_B')
return (content_B, attribute_B, mean, logvar)
def Decoder_A(self, content_B, attribute_A, reuse=False):
if self.concat:
x = self.generator_concat(x=content_B, z=attribute_A, reuse=reuse, scope='generator_concat_A')
else:
x = self.generator(x=content_B, z=attribute_A, reuse=reuse, scope='generator_A')
return x
def Decoder_B(self, content_A, attribute_B, reuse=False):
if self.concat:
x = self.generator_concat(x=content_A, z=attribute_B, reuse=reuse, scope='generator_concat_B')
else:
x = self.generator(x=content_A, z=attribute_B, reuse=reuse, scope='generator_B')
return x
def discriminate_real(self, x_A, x_B):
if self.multi:
real_A_logit = self.multi_discriminator(x_A, scope='multi_discriminator_A')
real_B_logit = self.multi_discriminator(x_B, scope='multi_discriminator_B')
else:
real_A_logit = self.discriminator(x_A, scope='discriminator_A')
real_B_logit = self.discriminator(x_B, scope='discriminator_B')
return (real_A_logit, real_B_logit)
def discriminate_fake(self, x_ba, x_ab):
if self.multi:
fake_A_logit = self.multi_discriminator(x_ba, reuse=True, scope='multi_discriminator_A')
fake_B_logit = self.multi_discriminator(x_ab, reuse=True, scope='multi_discriminator_B')
else:
fake_A_logit = self.discriminator(x_ba, reuse=True, scope='discriminator_A')
fake_B_logit = self.discriminator(x_ab, reuse=True, scope='discriminator_B')
return (fake_A_logit, fake_B_logit)
def discriminate_content(self, content_A, content_B, reuse=False):
content_A_logit = self.content_discriminator(content_A, reuse=reuse, scope='content_discriminator')
content_B_logit = self.content_discriminator(content_B, reuse=True, scope='content_discriminator')
return (content_A_logit, content_B_logit)
def build_model(self):
self.lr = tf.placeholder(tf.float32, name='lr')
self.content_lr = tf.placeholder(tf.float32, name='content_lr')
Image_Data_Class = ImageData(self.img_size, self.img_ch, self.augment_flag)
trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)
gpu_device = '/gpu:0'
trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainA_iterator = trainA.make_one_shot_iterator()
trainB_iterator = trainB.make_one_shot_iterator()
self.domain_A = trainA_iterator.get_next()
self.domain_B = trainB_iterator.get_next()
random_z = tf.random_normal(shape=[self.batch_size, self.n_z], mean=0.0, stddev=1.0, dtype=tf.float32)
(content_a, attribute_a, mean_a, logvar_a) = self.Encoder_A(self.domain_A)
(content_b, attribute_b, mean_b, logvar_b) = self.Encoder_B(self.domain_B)
fake_a = self.Decoder_A(content_B=content_b, attribute_A=attribute_a)
fake_b = self.Decoder_B(content_A=content_a, attribute_B=attribute_b)
recon_a = self.Decoder_A(content_B=content_a, attribute_A=attribute_a, reuse=True)
recon_b = self.Decoder_B(content_A=content_b, attribute_B=attribute_b, reuse=True)
random_fake_a = self.Decoder_A(content_B=content_b, attribute_A=random_z, reuse=True)
random_fake_b = self.Decoder_B(content_A=content_a, attribute_B=random_z, reuse=True)
(content_fake_a, attribute_fake_a, _, _) = self.Encoder_A(fake_a, reuse=True)
(content_fake_b, attribute_fake_b, _, _) = self.Encoder_B(fake_b, reuse=True)
cycle_a = self.Decoder_A(content_B=content_fake_b, attribute_A=attribute_fake_a, reuse=True)
cycle_b = self.Decoder_B(content_A=content_fake_a, attribute_B=attribute_fake_b, reuse=True)
(_, attribute_fake_random_a, _, _) = self.Encoder_A(random_fake_a, random_fake=True, reuse=True)
(_, attribute_fake_random_b, _, _) = self.Encoder_B(random_fake_b, random_fake=True, reuse=True)
(real_A_logit, real_B_logit) = self.discriminate_real(self.domain_A, self.domain_B)
(fake_A_logit, fake_B_logit) = self.discriminate_fake(fake_a, fake_b)
(random_fake_A_logit, random_fake_B_logit) = self.discriminate_fake(random_fake_a, random_fake_b)
(content_A_logit, content_B_logit) = self.discriminate_content(content_a, content_b)
g_adv_loss_a = (generator_loss(self.gan_type, fake_A_logit) + generator_loss(self.gan_type, random_fake_A_logit))
g_adv_loss_b = (generator_loss(self.gan_type, fake_B_logit) + generator_loss(self.gan_type, random_fake_B_logit))
g_con_loss_a = generator_loss(self.gan_type, content_A_logit, content=True)
g_con_loss_b = generator_loss(self.gan_type, content_B_logit, content=True)
g_cyc_loss_a = L1_loss(cycle_a, self.domain_A)
g_cyc_loss_b = L1_loss(cycle_b, self.domain_B)
g_rec_loss_a = L1_loss(recon_a, self.domain_A)
g_rec_loss_b = L1_loss(recon_b, self.domain_B)
g_latent_loss_a = L1_loss(attribute_fake_random_a, random_z)
g_latent_loss_b = L1_loss(attribute_fake_random_b, random_z)
if self.concat:
g_kl_loss_a = (kl_loss(mean_a, logvar_a) + l2_regularize(content_a))
g_kl_loss_b = (kl_loss(mean_b, logvar_b) + l2_regularize(content_b))
else:
g_kl_loss_a = (l2_regularize(attribute_a) + l2_regularize(content_a))
g_kl_loss_b = (l2_regularize(attribute_b) + l2_regularize(content_b))
d_adv_loss_a = discriminator_loss(self.gan_type, real_A_logit, fake_A_logit, random_fake_A_logit)
d_adv_loss_b = discriminator_loss(self.gan_type, real_B_logit, fake_B_logit, random_fake_B_logit)
d_con_loss = discriminator_loss(self.gan_type, content_A_logit, content_B_logit, content=True)
Generator_A_domain_loss = (self.domain_adv_w * g_adv_loss_a)
Generator_A_content_loss = (self.content_adv_w * g_con_loss_a)
Generator_A_cycle_loss = (self.cycle_w * g_cyc_loss_b)
Generator_A_recon_loss = (self.recon_w * g_rec_loss_a)
Generator_A_latent_loss = (self.latent_w * g_latent_loss_a)
Generator_A_kl_loss = (self.kl_w * g_kl_loss_a)
Generator_A_loss = (((((Generator_A_domain_loss + Generator_A_content_loss) + Generator_A_cycle_loss) + Generator_A_recon_loss) + Generator_A_latent_loss) + Generator_A_kl_loss)
Generator_B_domain_loss = (self.domain_adv_w * g_adv_loss_b)
Generator_B_content_loss = (self.content_adv_w * g_con_loss_b)
Generator_B_cycle_loss = (self.cycle_w * g_cyc_loss_a)
Generator_B_recon_loss = (self.recon_w * g_rec_loss_b)
Generator_B_latent_loss = (self.latent_w * g_latent_loss_b)
Generator_B_kl_loss = (self.kl_w * g_kl_loss_b)
Generator_B_loss = (((((Generator_B_domain_loss + Generator_B_content_loss) + Generator_B_cycle_loss) + Generator_B_recon_loss) + Generator_B_latent_loss) + Generator_B_kl_loss)
Discriminator_A_loss = (self.domain_adv_w * d_adv_loss_a)
Discriminator_B_loss = (self.domain_adv_w * d_adv_loss_b)
Discriminator_content_loss = (self.content_adv_w * d_con_loss)
self.Generator_loss = (Generator_A_loss + Generator_B_loss)
self.Discriminator_loss = (Discriminator_A_loss + Discriminator_B_loss)
self.Discriminator_content_loss = Discriminator_content_loss
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if (('encoder' in var.name) or ('generator' in var.name))]
D_vars = [var for var in t_vars if (('discriminator' in var.name) and ('content' not in var.name))]
D_content_vars = [var for var in t_vars if ('content_discriminator' in var.name)]
(grads, _) = tf.clip_by_global_norm(tf.gradients(self.Discriminator_content_loss, D_content_vars), clip_norm=5)
self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars)
self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars)
self.D_content_optim = tf.train.AdamOptimizer(self.content_lr, beta1=0.5, beta2=0.999).apply_gradients(zip(grads, D_content_vars))
self.lr_write = tf.summary.scalar('learning_rate', self.lr)
self.all_G_loss = tf.summary.scalar('Generator_loss', self.Generator_loss)
self.all_D_loss = tf.summary.scalar('Discriminator_loss', self.Discriminator_loss)
self.G_A_loss = tf.summary.scalar('G_A_loss', Generator_A_loss)
self.G_A_domain_loss = tf.summary.scalar('G_A_domain_loss', Generator_A_domain_loss)
self.G_A_content_loss = tf.summary.scalar('G_A_content_loss', Generator_A_content_loss)
self.G_A_cycle_loss = tf.summary.scalar('G_A_cycle_loss', Generator_A_cycle_loss)
self.G_A_recon_loss = tf.summary.scalar('G_A_recon_loss', Generator_A_recon_loss)
self.G_A_latent_loss = tf.summary.scalar('G_A_latent_loss', Generator_A_latent_loss)
self.G_A_kl_loss = tf.summary.scalar('G_A_kl_loss', Generator_A_kl_loss)
self.G_B_loss = tf.summary.scalar('G_B_loss', Generator_B_loss)
self.G_B_domain_loss = tf.summary.scalar('G_B_domain_loss', Generator_B_domain_loss)
self.G_B_content_loss = tf.summary.scalar('G_B_content_loss', Generator_B_content_loss)
self.G_B_cycle_loss = tf.summary.scalar('G_B_cycle_loss', Generator_B_cycle_loss)
self.G_B_recon_loss = tf.summary.scalar('G_B_recon_loss', Generator_B_recon_loss)
self.G_B_latent_loss = tf.summary.scalar('G_B_latent_loss', Generator_B_latent_loss)
self.G_B_kl_loss = tf.summary.scalar('G_B_kl_loss', Generator_B_kl_loss)
self.D_A_loss = tf.summary.scalar('D_A_loss', Discriminator_A_loss)
self.D_B_loss = tf.summary.scalar('D_B_loss', Discriminator_B_loss)
self.G_loss = tf.summary.merge([self.G_A_loss, self.G_A_domain_loss, self.G_A_content_loss, self.G_A_cycle_loss, self.G_A_recon_loss, self.G_A_latent_loss, self.G_A_kl_loss, self.G_B_loss, self.G_B_domain_loss, self.G_B_content_loss, self.G_B_cycle_loss, self.G_B_recon_loss, self.G_B_latent_loss, self.G_B_kl_loss, self.all_G_loss])
self.D_loss = tf.summary.merge([self.D_A_loss, self.D_B_loss, self.all_D_loss])
self.D_content_loss = tf.summary.scalar('Discriminator_content_loss', self.Discriminator_content_loss)
self.fake_A = random_fake_a
self.fake_B = random_fake_b
self.real_A = self.domain_A
self.real_B = self.domain_B
self.test_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_image')
self.test_random_z = tf.random_normal(shape=[1, self.n_z], mean=0.0, stddev=1.0, dtype=tf.float32)
(test_content_a, _, _, _) = self.Encoder_A(self.test_image, is_training=False, reuse=True)
(test_content_b, _, _, _) = self.Encoder_B(self.test_image, is_training=False, reuse=True)
self.test_fake_A = self.Decoder_A(content_B=test_content_b, attribute_A=self.test_random_z, reuse=True)
self.test_fake_B = self.Decoder_B(content_A=test_content_a, attribute_B=self.test_random_z, reuse=True)
self.content_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='content_image')
self.attribute_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='guide_attribute_image')
if (self.direction == 'a2b'):
(guide_content_A, _, _, _) = self.Encoder_A(self.content_image, is_training=False, reuse=True)
(_, guide_attribute_B, _, _) = self.Encoder_B(self.attribute_image, is_training=False, reuse=True)
self.guide_fake_B = self.Decoder_B(content_A=guide_content_A, attribute_B=guide_attribute_B, reuse=True)
else:
(guide_content_B, _, _, _) = self.Encoder_B(self.content_image, is_training=False, reuse=True)
(_, guide_attribute_A, _, _) = self.Encoder_A(self.attribute_image, is_training=False, reuse=True)
self.guide_fake_A = self.Decoder_A(content_B=guide_content_B, attribute_A=guide_attribute_A, reuse=True)
def train(self):
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
self.writer = tf.summary.FileWriter(((self.log_dir + '/') + self.model_dir), self.sess.graph)
(could_load, checkpoint_counter) = self.load(self.checkpoint_dir)
if could_load:
start_epoch = int((checkpoint_counter / self.iteration))
start_batch_id = (checkpoint_counter - (start_epoch * self.iteration))
counter = checkpoint_counter
print(' [*] Load SUCCESS')
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(' [!] Load failed...')
start_time = time.time()
lr = self.init_lr
content_lr = self.content_init_lr
for epoch in range(start_epoch, self.epoch):
if self.decay_flag:
lr = (self.init_lr if (epoch < self.decay_epoch) else ((self.init_lr * (self.epoch - epoch)) / (self.epoch - self.decay_epoch)))
content_lr = (self.content_init_lr if (epoch < self.decay_epoch) else ((self.content_init_lr * (self.epoch - epoch)) / (self.epoch - self.decay_epoch)))
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {self.lr: lr, self.content_lr: content_lr}
summary_str = self.sess.run(self.lr_write, feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
(_, d_con_loss, summary_str) = self.sess.run([self.D_content_optim, self.Discriminator_content_loss, self.D_content_loss], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
if (((counter - 1) % self.n_d_con) == 0):
(_, d_loss, summary_str) = self.sess.run([self.D_optim, self.Discriminator_loss, self.D_loss], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
(batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str) = self.sess.run([self.real_A, self.real_B, self.fake_A, self.fake_B, self.G_optim, self.Generator_loss, self.G_loss], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
print(('Epoch: [%2d] [%6d/%6d] time: %4.4f d_con_loss: %.8f, d_loss: %.8f, g_loss: %.8f' % (epoch, idx, self.iteration, (time.time() - start_time), d_con_loss, d_loss, g_loss)))
else:
print(('Epoch: [%2d] [%6d/%6d] time: %4.4f d_con_loss: %.8f' % (epoch, idx, self.iteration, (time.time() - start_time), d_con_loss)))
if (np.mod((idx + 1), self.print_freq) == 0):
save_images(batch_A_images, [self.batch_size, 1], './{}/real_A_{:03d}_{:05d}.jpg'.format(self.sample_dir, epoch, (idx + 1)))
save_images(fake_B, [self.batch_size, 1], './{}/fake_B_{:03d}_{:05d}.jpg'.format(self.sample_dir, epoch, (idx + 1)))
counter += 1
if (np.mod((idx + 1), self.save_freq) == 0):
self.save(self.checkpoint_dir, counter)
start_batch_id = 0
self.save(self.checkpoint_dir, counter)
def model_dir(self):
if self.concat:
concat = '_concat'
else:
concat = ''
if self.sn:
sn = '_sn'
else:
sn = ''
return '{}{}_{}_{}_{}layer_{}dis_{}scale_{}con{}'.format(self.model_name, concat, self.dataset_name, self.gan_type, self.n_layer, self.n_dis, self.n_scale, self.n_d_con, sn)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, (self.model_name + '.model')), global_step=step)
def load(self, checkpoint_dir):
print(' [*] Reading checkpoints...')
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if (ckpt and ckpt.model_checkpoint_path):
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[(- 1)])
print(' [*] Success to read {}'.format(ckpt_name))
return (True, counter)
else:
print(' [*] Failed to find a checkpoint')
return (False, 0)
def test(self):
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format((self.dataset_name + '/testA')))
test_B_files = glob('./dataset/{}/*.*'.format((self.dataset_name + '/testB')))
self.saver = tf.train.Saver()
(could_load, checkpoint_counter) = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir)
check_folder(self.result_dir)
if could_load:
print(' [*] Load SUCCESS')
else:
print(' [!] Load failed...')
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write('<html><body><table><tr>')
index.write('<th>name</th><th>input</th><th>output</th></tr>')
for sample_file in test_A_files:
print(('Processing A image: ' + sample_file))
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
file_name = os.path.basename(sample_file).split('.')[0]
file_extension = os.path.basename(sample_file).split('.')[1]
for i in range(self.num_attribute):
image_path = os.path.join(self.result_dir, '{}_attribute{}.{}'.format(file_name, i, file_extension))
fake_img = self.sess.run(self.test_fake_B, feed_dict={self.test_image: sample_image})
save_images(fake_img, [1, 1], image_path)
index.write(('<td>%s</td>' % os.path.basename(image_path)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((sample_file if os.path.isabs(sample_file) else (('../..' + os.path.sep) + sample_file)), self.img_size, self.img_size)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((image_path if os.path.isabs(image_path) else (('../..' + os.path.sep) + image_path)), self.img_size, self.img_size)))
index.write('</tr>')
for sample_file in test_B_files:
print(('Processing B image: ' + sample_file))
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
file_name = os.path.basename(sample_file).split('.')[0]
file_extension = os.path.basename(sample_file).split('.')[1]
for i in range(self.num_attribute):
image_path = os.path.join(self.result_dir, '{}_attribute{}.{}'.format(file_name, i, file_extension))
fake_img = self.sess.run(self.test_fake_A, feed_dict={self.test_image: sample_image})
save_images(fake_img, [1, 1], image_path)
index.write(('<td>%s</td>' % os.path.basename(image_path)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((sample_file if os.path.isabs(sample_file) else (('../..' + os.path.sep) + sample_file)), self.img_size, self.img_size)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((image_path if os.path.isabs(image_path) else (('../..' + os.path.sep) + image_path)), self.img_size, self.img_size)))
index.write('</tr>')
index.close()
def guide_test(self):
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format((self.dataset_name + '/testA')))
test_B_files = glob('./dataset/{}/*.*'.format((self.dataset_name + '/testB')))
attribute_file = np.asarray(load_test_data(self.guide_img, size=self.img_size))
self.saver = tf.train.Saver()
(could_load, checkpoint_counter) = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir, 'guide')
check_folder(self.result_dir)
if could_load:
print(' [*] Load SUCCESS')
else:
print(' [!] Load failed...')
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write('<html><body><table><tr>')
index.write('<th>name</th><th>input</th><th>output</th></tr>')
if (self.direction == 'a2b'):
for sample_file in test_A_files:
print(('Processing A image: ' + sample_file))
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.guide_fake_B, feed_dict={self.content_image: sample_image, self.attribute_image: attribute_file})
save_images(fake_img, [1, 1], image_path)
index.write(('<td>%s</td>' % os.path.basename(image_path)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((sample_file if os.path.isabs(sample_file) else (('../../..' + os.path.sep) + sample_file)), self.img_size, self.img_size)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((image_path if os.path.isabs(image_path) else (('../../..' + os.path.sep) + image_path)), self.img_size, self.img_size)))
index.write('</tr>')
else:
for sample_file in test_B_files:
print(('Processing B image: ' + sample_file))
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.guide_fake_A, feed_dict={self.content_image: sample_image, self.attribute_image: attribute_file})
save_images(fake_img, [1, 1], image_path)
index.write(('<td>%s</td>' % os.path.basename(image_path)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((sample_file if os.path.isabs(sample_file) else (('../../..' + os.path.sep) + sample_file)), self.img_size, self.img_size)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((image_path if os.path.isabs(image_path) else (('../../..' + os.path.sep) + image_path)), self.img_size, self.img_size)))
index.write('</tr>')
index.close() |
class HuffmanMMapIndex():
_HDR_MAGIC = b'HUFFIDX\x00\x00'
_VERSION = 1
def writer(cls, path: str, data_len: int):
class _Writer():
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', cls._VERSION))
self._file.write(struct.pack('<Q', data_len))
return self
def write(self, sizes, pointers):
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert (self._HDR_MAGIC == magic_test), "Index file doesn't match expected format. Make sure that --dataset-impl is configured properly."
(version,) = struct.unpack('<Q', stream.read(8))
assert (self._VERSION == version), 'Unexpected file version f{version} != code version f{self._VERSION}'
(self._data_len,) = struct.unpack('<Q', stream.read(8))
(self._len,) = struct.unpack('<Q', stream.read(8))
offset = stream.tell()
indexed_dataset._warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len, offset=(offset + self._sizes.nbytes))
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
def __iter__(self):
for i in range(self._len):
(yield self[i])
def data_len(self):
return self._data_len
def sizes(self):
return self._sizes
_cache(maxsize=8)
def __getitem__(self, i):
return (self._pointers[i], self._sizes[i])
def __len__(self):
return self._len |
def _compute_aspect_ratios_voc_dataset(dataset, indices=None):
if (indices is None):
indices = range(len(dataset))
aspect_ratios = []
for i in indices:
(width, height) = Image.open(dataset.images[i]).size
aspect_ratio = (float(width) / float(height))
aspect_ratios.append(aspect_ratio)
return aspect_ratios |
def layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True) |
def load_from_ckpt(dynamics: Dynamics, optimizer: torch.optim.Optimizer, cfg: DictConfig) -> tuple[(torch.nn.Module, torch.optim.Optimizer, dict)]:
outdir = Path(cfg.get('outdir', os.getcwd()))
if (not (ckpts := list(outdir.joinpath('train', 'checkpoints').rglob('*.tar')))):
raise FileNotFoundError(f'No checkpoints found in {outdir}')
latest = max(ckpts, key=(lambda p: p.stat().st_ctime))
if (not latest.is_file()):
raise FileNotFoundError(f'No checkpoints found in {outdir}')
log.info(f'Loading from checkpoint: {latest}')
ckpt = torch.load(latest)
dynamics.load_state_dict(ckpt['model_state_dict'])
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
dynamics.assign_eps({'xeps': ckpt['xeps'], 'veps': ckpt['veps']})
return (dynamics, optimizer, ckpt) |
def test_sag_multiclass_classes():
(X, y) = make_classification(n_samples=10, random_state=0, n_classes=3, n_informative=4)
sag = SAGClassifier()
sag.fit(X, y)
assert (list(sag.classes_) == [0, 1, 2]) |
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('linear_outputs', model.linear_outputs)
tf.summary.histogram('linear_targets', model.linear_targets)
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('loss_mel', model.mel_loss)
tf.summary.scalar('loss_linear', model.linear_loss)
tf.summary.scalar('learning_rate', model.learning_rate)
tf.summary.scalar('loss', model.loss)
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
return tf.summary.merge_all() |
def load_archive(archive_file: str, device=None, weights_file: str=None) -> Archive:
resolved_archive_file = cached_path(archive_file)
if (resolved_archive_file == archive_file):
logger.info(f'loading archive file {archive_file}')
else:
logger.info(f'loading archive file {archive_file} from cache at {resolved_archive_file}')
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
logger.info(f'extracting archive file {resolved_archive_file} to temp dir {tempdir}')
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME))
config.loading_from_archive = True
if weights_file:
weights_path = weights_file
else:
weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME)
model = Model.load(config, weights_file=weights_path, serialization_dir=serialization_dir, device=device)
if tempdir:
shutil.rmtree(tempdir)
return Archive(model=model, config=config) |
def cluster_layout(G, pos_nodes, pos_clusters):
pos = {}
for node in G.nodes():
pos[node] = (pos_nodes[node] + pos_clusters[node])
return pos |
def test_evaluate_coverage(tmpdir):
from skmultiflow.data import SEAGenerator
from skmultiflow.bayes import NaiveBayes
max_samples = 1000
stream = SEAGenerator(random_state=1)
nb = NaiveBayes()
output_file = os.path.join(str(tmpdir), 'prequential_summary.csv')
metrics = ['running_time', 'model_size']
evaluator = EvaluatePrequential(max_samples=max_samples, metrics=metrics, data_points_for_classification=True, output_file=output_file)
evaluator.evaluate(stream=stream, model=nb, model_names=['NB']) |
class LRSchedulerFactory(abc.ABC):
def create(self, optimizer: torch.optim.Optimizer) -> torch.optim.lr_scheduler._LRScheduler: |
_module()
class RFP(FPN):
def __init__(self, rfp_steps, rfp_backbone, aspp_out_channels, aspp_dilations=(1, 3, 6, 1), init_cfg=None, **kwargs):
assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg, **kwargs)
self.rfp_steps = rfp_steps
self.rfp_modules = ModuleList()
for rfp_idx in range(1, rfp_steps):
rfp_module = build_backbone(rfp_backbone)
self.rfp_modules.append(rfp_module)
self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, aspp_dilations)
self.rfp_weight = nn.Conv2d(self.out_channels, 1, kernel_size=1, stride=1, padding=0, bias=True)
def init_weights(self):
for convs in [self.lateral_convs, self.fpn_convs]:
for m in convs.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
for rfp_idx in range((self.rfp_steps - 1)):
self.rfp_modules[rfp_idx].init_weights()
constant_init(self.rfp_weight, 0)
def forward(self, inputs):
inputs = list(inputs)
assert (len(inputs) == (len(self.in_channels) + 1))
img = inputs.pop(0)
x = super().forward(tuple(inputs))
for rfp_idx in range((self.rfp_steps - 1)):
rfp_feats = ([x[0]] + list((self.rfp_aspp(x[i]) for i in range(1, len(x)))))
x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
x_idx = super().forward(x_idx)
x_new = []
for ft_idx in range(len(x_idx)):
add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
x_new.append(((add_weight * x_idx[ft_idx]) + ((1 - add_weight) * x[ft_idx])))
x = x_new
return x |
def train_step(ds_one, ds_two, f, h, optimizer):
with tf.GradientTape() as tape:
(z1, z2) = (f(ds_one), f(ds_two))
(p1, p2) = (h(z1), h(z2))
loss = ((loss_func(p1, z2) / 2) + (loss_func(p2, z1) / 2))
learnable_params = (f.trainable_variables + h.trainable_variables)
gradients = tape.gradient(loss, learnable_params)
optimizer.apply_gradients(zip(gradients, learnable_params))
return loss |
def test_sub():
var1 = optplan.Parameter()
var2 = optplan.Parameter()
diff = (var2 - var1)
assert isinstance(diff, optplan.Sum) |
def mk_vs_proj_dep_groups(f, name, components):
f.write(' <ItemGroup>\n')
deps = find_all_deps(name, components)
for dep in deps:
dep = get_component(dep)
for cpp in filter((lambda f: f.endswith('.cpp')), os.listdir(dep.src_dir)):
f.write((' <ClCompile Include="%s" />\n' % os.path.join(dep.to_src_dir, cpp)))
f.write(' </ItemGroup>\n') |
def test(encoder, classifier, test_loader, imagenet_loader, args, epoch, tb_logger):
with torch.no_grad():
encoder.eval()
classifier.eval()
top1_webvision = AverageMeter('', ':4.2f')
top5_webvision = AverageMeter('', ':4.2f')
top1_imagenet = AverageMeter('', ':4.2f')
top5_imagenet = AverageMeter('', ':4.2f')
print('==> Evaluation...')
for (batch_idx, (img, target)) in enumerate(test_loader):
img = img.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
feature = encoder(img)
outputs = classifier(feature)
(acc1, acc5) = accuracy(outputs, target, topk=(1, 5))
top1_webvision.update(acc1[0])
top5_webvision.update(acc5[0])
for (batch_idx, (img, target)) in enumerate(imagenet_loader):
img = img.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
feature = encoder(img)
outputs = classifier(feature)
(acc1, acc5) = accuracy(outputs, target, topk=(1, 5))
top1_imagenet.update(acc1[0])
top5_imagenet.update(acc5[0])
acc_tensors = torch.Tensor([top1_webvision.avg, top5_webvision.avg, top1_imagenet.avg, top5_imagenet.avg]).cuda(args.gpu)
dist.all_reduce(acc_tensors)
acc_tensors /= args.world_size
print(('Webvision Accuracy is %.2f%% (%.2f%%)' % (acc_tensors[0], acc_tensors[1])))
print(('ImageNet Accuracy is %.2f%% (%.2f%%)' % (acc_tensors[2], acc_tensors[3])))
if (args.gpu == 0):
tb_logger.log_value('WebVision top1 Acc', acc_tensors[0], epoch)
tb_logger.log_value('WebVision top5 Acc', acc_tensors[1], epoch)
tb_logger.log_value('ImageNet top1 Acc', acc_tensors[2], epoch)
tb_logger.log_value('ImageNet top5 Acc', acc_tensors[3], epoch)
return |
def parse_args():
parser = argparse.ArgumentParser(description='Check cuckoo oracle for the PDFs generated by reverse mimicry.')
parser.add_argument('--var_dir', type=str, help='Variant files directory.', required=True)
return parser.parse_args() |
class EpochBatchIterator(EpochBatchIterating):
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
self.epoch = epoch
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
if (self._next_epoch_itr is not None):
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus)
self.dataset.set_epoch(self.epoch)
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
return (not self._cur_epoch_itr.has_next())
def iterations_in_epoch(self):
if (self._cur_epoch_itr is not None):
return self._cur_epoch_itr.count
elif (self._next_epoch_itr is not None):
return self._next_epoch_itr.count
return 0
def state_dict(self):
return {'epoch': self.epoch, 'iterations_in_epoch': self.iterations_in_epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if (itr_pos > 0):
self._next_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle=state_dict.get('shuffle', True), offset=itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if (shuffle and (not fix_batches_to_gpus)):
batches = shuffle_batches(list(batches), (self.seed + epoch))
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
self.dataset.prefetch([i for s in batches for i in s])
if (shuffle and fix_batches_to_gpus):
batches = shuffle_batches(batches, ((self.seed + epoch) + self.shard_id))
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), (self.seed + epoch))
else:
batches = self.frozen_batches
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
if ((offset > 0) and (offset >= len(batches))):
return None
if (self.num_workers > 0):
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
return CountingIterator(torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers), start=offset) |
def noncat_slot_value_match(str_ref_list, str_hyp, use_fuzzy_match):
score = 0.0
for str_ref in str_ref_list:
if (not use_fuzzy_match):
match_score = float((str_ref == str_hyp))
else:
match_score = fuzzy_string_match(str_ref, str_hyp)
score = max(score, match_score)
return score |
class StructOrUnionScope(Scope):
def __init__(self, name='?'):
Scope.__init__(self, name, None, None)
def declare_var(self, name, type, pos, cname=None, visibility='private', api=0, in_pxd=0, is_cdef=0, allow_pyobject=False, allow_memoryview=False):
if (not cname):
cname = name
if (visibility == 'private'):
cname = c_safe_identifier(cname)
if type.is_cfunction:
type = PyrexTypes.CPtrType(type)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if (type.is_pyobject and (not allow_pyobject)):
error(pos, 'C struct/union member cannot be a Python object')
elif (type.is_memoryviewslice and (not allow_memoryview)):
error(pos, 'C struct/union member cannot be a memory view')
if (visibility != 'private'):
error(pos, ('C struct/union member cannot be declared %s' % visibility))
return entry
def declare_cfunction(self, name, type, pos, cname=None, visibility='private', api=0, in_pxd=0, defining=0, modifiers=(), overridable=False):
if overridable:
error(pos, "C struct/union member cannot be declared 'cpdef'")
return self.declare_var(name, type, pos, cname=cname, visibility=visibility) |
def require_pandas(test_case):
return unittest.skipUnless(is_pandas_available(), 'test requires pandas')(test_case) |
class BatchNorm(nn.Module):
def __init__(self, out_channels):
super(BatchNorm, self).__init__()
self.batch_norm = nn.BatchNorm3d(num_features=out_channels)
def forward(self, input):
(x, m) = input
x = self.batch_norm(x)
return (x, m) |
def test_init():
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
tracer.register_code_object(MagicMock(CodeObjectMetaData))
tracer.executed_code_object(0)
trace = tracer.get_trace()
tracer.init_trace()
assert (tracer.get_trace() != trace) |
def test_gimvi_model_library_size():
adata_seq = synthetic_iid()
adata_spatial = synthetic_iid()
GIMVI.setup_anndata(adata_seq, batch_key='batch', labels_key='labels')
GIMVI.setup_anndata(adata_spatial, batch_key='batch', labels_key='labels')
model = GIMVI(adata_seq, adata_spatial, model_library_size=[True, True], n_latent=10)
assert (hasattr(model.module, 'library_log_means_0') and hasattr(model.module, 'library_log_means_1'))
model.train(1, check_val_every_n_epoch=1, train_size=0.5)
model.get_latent_representation()
model.get_imputed_values() |
def load_bin(path, fill=0.0):
with open(path, 'rb') as f:
bb = f.read((4 * 4))
v = struct.unpack('4i', bb)
bb = f.read((v[0] * 4))
v = struct.unpack(('%df' % v[0]), bb)
feature = np.full(((feature_dim + feature_ext),), fill, dtype=np.float32)
feature[0:feature_dim] = v
return feature |
def assert_similar(ref, real):
np.testing.assert_equal(len(ref), len(real))
for i in range(len(ref)):
np.testing.assert_allclose(ref[i], real[i], rtol=0.001) |
.parametrize('workers', (1, 2))
def test_connection_error(cli, schema_url, workers, snapshot_cli):
assert (cli.run(schema_url, '--base-url= f'--workers={workers}') == snapshot_cli) |
class InfoSet(object):
def __init__(self, player_position):
self.player_position = player_position
self.player_hand_cards = None
self.num_cards_left_dict = None
self.three_landlord_cards = None
self.card_play_action_seq = None
self.other_hand_cards = None
self.legal_actions = None
self.last_move = None
self.last_two_moves = None
self.last_move_dict = None
self.played_cards = None
self.all_handcards = None
self.last_pid = None
self.bomb_num = None
self.init_card = None |
def set_blob_potential(implementation):
if (implementation == 'None'):
def default_zero_r_vectors(*args, **kwargs):
return 0
return default_zero
elif (implementation == 'python'):
return calc_blob_potential_python
elif (implementation == 'C++'):
return calc_blob_potential_boost
elif (implementation == 'pycuda'):
return pycuda.calc_blob_potential_pycuda |
def pevaluate(q):
while True:
args = q.get()
if (args is None):
q.task_done()
break
evaluate(*args)
q.task_done() |
class ConvLSTMCell(rnn_cell_impl.RNNCell):
def __init__(self, conv_ndims, input_shape, output_channels, kernel_shape, dilation=1, use_bias=True, skip_connection=False, forget_bias=1.0, initializers=None, name='conv_lstm_cell'):
super(ConvLSTMCell, self).__init__(name=name)
if (conv_ndims != (len(input_shape) - 1)):
raise ValueError('Invalid input_shape {} for conv_ndims={}.'.format(input_shape, conv_ndims))
self._dilation = dilation
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = list(kernel_shape)
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[(- 1)]
state_size = tensor_shape.TensorShape((self._input_shape[:(- 1)] + [self._output_channels]))
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape((self._input_shape[:(- 1)] + [self._total_output_channels]))
def output_size(self):
return self._output_size
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
(cell, hidden) = state
new_hidden = _conv([inputs, hidden], self._kernel_shape, (4 * self._output_channels), self._use_bias, dilations=[1, 1, 1, 1], name='kernel')
gates = array_ops.split(value=new_hidden, num_or_size_splits=4, axis=(self._conv_ndims + 1))
(input_gate, new_input, forget_gate, output_gate) = gates
new_cell = (math_ops.sigmoid((forget_gate + self._forget_bias)) * cell)
new_cell += (math_ops.sigmoid(input_gate) * math_ops.tanh(new_input))
output = (math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate))
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=(- 1))
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return (output, new_state) |
class BitBackbone(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def clear_all_gradients(gradient_type=SNodeGradType.ADJOINT):
impl.get_runtime().materialize()
def visit(node):
places = []
for _i in range(node.ptr.get_num_ch()):
ch = node.ptr.get_ch(_i)
if (not ch.is_place()):
visit(SNode(ch))
elif (ch.get_snode_grad_type() == gradient_type):
places.append(ch.get_expr())
places = tuple(places)
if places:
from taichi._kernels import clear_gradients
clear_gradients(places)
for root_fb in _snode.FieldsBuilder._finalized_roots():
visit(root_fb) |
def list_dir_single(directory):
for (root, dirs, files) in os.walk(directory):
return dirs |
def _convert_when(when):
if isinstance(when, np.ndarray):
return when
try:
return _when_to_num[when]
except (KeyError, TypeError):
return [_when_to_num[x] for x in when] |
def register_Ns3NetDevice_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
return |
class TextDataset(Dataset):
def __init__(self, fname, vocab, bos=False):
self.path = Path(fname)
self.vocab = vocab
self.bos = bos
self.fnames = sorted(self.path.parent.glob(self.path.name))
if (len(self.fnames) == 0):
raise RuntimeError('{} does not exist.'.format(self.path))
elif (len(self.fnames) > 1):
logger.info('Multiple files found, using first: {}'.format(self.fnames[0]))
(self.data, self.lengths) = read_sentences(self.fnames[0], self.vocab, bos=self.bos)
self.size = len(self.data)
def to_torch(batch):
return pad_data(batch)
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.size
def __repr__(self):
s = "{} '{}' ({} sentences)\n".format(self.__class__.__name__, self.fnames[0].name, self.__len__())
return s |
def register_Ns3FqCoDelQueueDisc_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetQuantum', 'void', [param('uint32_t', 'quantum')])
cls.add_method('GetQuantum', 'uint32_t', [], is_const=True)
cls.add_static_attribute('UNCLASSIFIED_DROP', 'char const * const', is_const=True)
cls.add_static_attribute('OVERLIMIT_DROP', 'char const * const', is_const=True)
cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], visibility='private', is_virtual=True)
cls.add_method('DoDequeue', 'ns3::Ptr< ns3::QueueDiscItem >', [], visibility='private', is_virtual=True)
cls.add_method('DoPeek', 'ns3::Ptr< ns3::QueueDiscItem const >', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('CheckConfig', 'bool', [], visibility='private', is_virtual=True)
cls.add_method('InitializeParams', 'void', [], visibility='private', is_virtual=True)
return |
def test_block_reduce_sum():
image1 = np.arange((4 * 6)).reshape(4, 6)
out1 = block_reduce(image1, (2, 3))
expected1 = np.array([[24, 42], [96, 114]])
assert_equal(expected1, out1)
image2 = np.arange((5 * 8)).reshape(5, 8)
out2 = block_reduce(image2, (3, 3))
expected2 = np.array([[81, 108, 87], [174, 192, 138]])
assert_equal(expected2, out2) |
class NoBNSecondMomentTest(BaseSecondMomentTest):
def __init__(self, unit_test):
self.i = 0
super().__init__(unit_test, linear_layer=layers.Conv2D)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = self.linear_layer(1, 1, padding='same', kernel_initializer='ones', bias_initializer='zeros')(inputs)
x = layers.Activation('relu')(x)
return tf.keras.models.Model(inputs=inputs, outputs=x)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
attr = DEFAULT_KERAS_INFO.get_kernel_op_attributes(self.linear_layer)[0]
linear_layer = get_layers_from_model_by_type(quantized_model, self.linear_layer)[0]
quantized_model_kernel = linear_layer.get_quantized_weights()[attr]
quantized_model_bias = linear_layer.weights[2]
float_model_kernel = float_model.layers[1].weights[0]
float_model_bias = float_model.layers[1].weights[1]
self.unit_test.assertTrue(np.isclose(quantized_model_kernel, float_model_kernel, atol=0.1))
self.unit_test.assertTrue(np.isclose(quantized_model_bias, float_model_bias, atol=0.1)) |
class TrainedTernaryConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super(TrainedTernaryConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size)
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
self.bias = None
self.kwargs = {'stride': (stride, stride), 'padding': (padding, padding)}
nn.modules.conv._ConvNd.reset_parameters(self)
w_initial = self.weight.abs().mean().data[0]
self.wp = nn.Parameter(torch.Tensor(1).fill_(w_initial))
self.wn = nn.Parameter(torch.Tensor(1).fill_(w_initial))
def __repr__(self):
s = 'Trained Ternary Convolution({}, {}, kernel_size={}, stride={}, padding={})'.format(self.in_channels, self.out_channels, self.kernel_size, self.kwargs['stride'], self.kwargs['padding'])
return s
def forward(self, x):
ternary_weight = TrainedTernarize.apply(self.weight, self.wp, self.wn)
output = F.conv2d(x, ternary_weight, **self.kwargs)
return output |
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, drop=0):
super(DownBlock, self).__init__()
padding = int(((kernel_size - 1) / 2))
self.block_conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=padding), nn.Dropout2d(drop), nn.Conv2d(out_channels, out_channels, kernel_size, stride=1, padding=padding), nn.Dropout2d(drop))
self.block_mp = nn.Sequential(nn.MaxPool2d((2, 2)), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x_in):
x1 = self.block_conv(x_in)
x1_pool = self.block_mp(x1)
return (x1, x1_pool) |
def build_adadelta(model, base_learning_rate, parameters=None, max_gradient_norm=None, allow_lr_injection=False, **kwargs):
adadelta_optimizer = AdadeltaOptimizer(alpha=base_learning_rate, **kwargs)
return _build(model, adadelta_optimizer, max_gradient_norm=max_gradient_norm, allow_lr_injection=allow_lr_injection) |
class RecordArray(Content):
def __init__(self, contents, recordlookup, length):
assert isinstance(contents, list)
if (len(contents) == 0):
assert isinstance(length, int)
assert (length >= 0)
else:
assert (length is None)
for x in contents:
assert isinstance(x, Content)
assert ((recordlookup is None) or isinstance(recordlookup, list))
if isinstance(recordlookup, list):
assert (len(recordlookup) == len(contents))
for x in recordlookup:
assert isinstance(x, str)
self.contents = contents
self.recordlookup = recordlookup
self.length = length
def random(minlen=0, choices=None):
length = random_length(minlen)
contents = []
for i in range(random.randint(0, 2)):
contents.append(Content.random(length, choices))
if (len(contents) != 0):
length = None
if (random.randint(0, 1) == 0):
recordlookup = None
else:
recordlookup = [('x' + str(i)) for i in range(len(contents))]
return RecordArray(contents, recordlookup, length)
def __len__(self):
if (len(self.contents) == 0):
return self.length
else:
return min((len(x) for x in self.contents))
def __getitem__(self, where):
if isinstance(where, int):
assert (0 <= where < len(self))
record = [x[where] for x in self.contents]
if (self.recordlookup is None):
return tuple(record)
else:
return dict(zip(self.recordlookup, record))
elif (isinstance(where, slice) and (where.step is None)):
if (len(self.contents) == 0):
start = min(max(where.start, 0), self.length)
stop = min(max(where.stop, 0), self.length)
if (stop < start):
stop = start
return RecordArray([], self.recordlookup, (stop - start))
else:
return RecordArray([x[where] for x in self.contents], self.recordlookup, self.length)
elif isinstance(where, str):
if (self.recordlookup is None):
try:
i = int(where)
except ValueError:
pass
else:
if (i < len(self.contents)):
return self.contents[i]
else:
try:
i = self.recordlookup.index(where)
except ValueError:
pass
else:
return self.contents[i]
raise ValueError((('field ' + repr(where)) + ' not found'))
else:
raise AssertionError(where)
def carry(self, index):
assert all(((0 <= i < len(self)) for i in index))
if (len(self.contents) == 0):
return RecordArray([], self.recordlookup, len(index))
else:
return RecordArray([x.carry(index) for x in self.contents], self.recordlookup, self.length)
def purelist_depth(self):
return 1
def minmax_depth(self):
(min, max) = (None, None)
for content in self.contents:
(thismin, thismax) = content.minmax_depth()
if ((min is None) or (thismin < min)):
min = thismin
if ((max is None) or (thismax > max)):
max = thismax
return (min, max)
def branch_depth(self):
if (len(self.contents) == 0):
return (False, 1)
else:
anybranch = False
mindepth = (- 1)
for content in self.contents:
(branch, depth) = content.branch_depth()
if (mindepth == (- 1)):
mindepth = depth
if (branch or (mindepth != depth)):
anybranch = True
if (mindepth > depth):
mindepth = depth
return (anybranch, mindepth)
def __repr__(self):
return (((((('RecordArray([' + ', '.join((repr(x) for x in self.contents))) + '], ') + repr(self.recordlookup)) + ', ') + repr(self.length)) + ')')
def toxml(self, indent='', pre='', post=''):
out = ((indent + pre) + '<RecordArray>\n')
if (len(self.contents) == 0):
out += (((indent + ' <istuple>') + str((self.recordlookup is None))) + '</istuple>\n')
out += (((indent + ' <length>') + str(self.length)) + '</length>\n')
if (self.recordlookup is None):
for (i, content) in enumerate(self.contents):
out += content.toxml((indent + ' '), (('<content i="' + str(i)) + '">'), '</content>\n')
else:
for (i, (key, content)) in enumerate(zip(self.recordlookup, self.contents)):
out += content.toxml((indent + ' '), (((('<content i="' + str(i)) + '" key="') + repr(key)) + '">'), '</content>\n')
out += ((indent + '</RecordArray>') + post)
return out |
class ConvNet2D(nn.Module):
def __init__(self, embed_dim, num=50, width=7):
super(ConvNet2D, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d((2 * embed_dim), num, 1)
self.conv2 = nn.Conv2d(num, 1, width, padding=(width // 2))
self.clip()
def forward(self, z):
z = z.transpose(1, 2)
z_dif = torch.abs((z.unsqueeze(2) - z.unsqueeze(3)))
z_mul = (z.unsqueeze(2) * z.unsqueeze(3))
z = torch.cat([z_dif, z_mul], 1)
h = self.relu(self.conv1(z))
logits = self.conv2(h).squeeze(1)
return logits
def load_weights(self, pretrained_model):
state_dict = self.state_dict()
for (key, value) in torch.load(pretrained_model, map_location=torch.device('cpu')).items():
if key.startswith('module'):
key = key[7:]
if ((key in state_dict) and (value.shape == state_dict[key].shape)):
state_dict[key] = value
self.load_state_dict(state_dict)
def clip(self):
w = self.conv2.weight
self.conv2.weight.data[:] = (0.5 * (w + w.transpose(2, 3))) |
def main():
p = argparse.ArgumentParser(__doc__.strip())
p.add_argument('range', help=argparse.SUPPRESS)
p.add_argument('-d', '--debug', action='store_true', help='print debug output')
p.add_argument('-n', '--new', action='store_true', help='print debug output')
options = p.parse_args()
try:
(rev1, rev2) = options.range.split('..')
except ValueError:
p.error('argument is not a revision range')
NAME_MAP = load_name_map(MAILMAP_FILE)
all_authors = set()
authors = collections.Counter()
def analyze_line(line, names, disp=False):
line = line.strip().decode('utf-8')
m = re.match('^([^]*)', line)
if m:
name = m.group(1)
line = line[m.end():]
name = NAME_MAP.get(name, name)
if disp:
if (name not in names):
stdout_b.write((' - Author: %s\n' % name).encode('utf-8'))
names.update((name,))
m = re.search('([Tt]hanks to|[Cc]ourtesy of|Co-authored-by:) ([A-Z][A-Za-z]*? [A-Z][A-Za-z]*? [A-Z][A-Za-z]*|[A-Z][A-Za-z]*? [A-Z]\\. [A-Z][A-Za-z]*|[A-Z][A-Za-z ]*? [A-Z][A-Za-z]*|[a-z0-9]+)($|\\.| )', line)
if m:
name = m.group(2)
if (name not in ('this',)):
if disp:
stdout_b.write((' - Log : %s\n' % line.strip().encode('utf-8')))
name = NAME_MAP.get(name, name)
names.update((name,))
line = line[m.end():].strip()
line = re.sub('^(and|, and|, ) ', 'Thanks to ', line)
analyze_line(line.encode('utf-8'), names)
for line in git.pipe('log', '--pretty=%%%%n%b', f'{rev1}'):
analyze_line(line, all_authors)
for line in git.pipe('log', '--pretty=%%%%n%b', f'{rev1}..{rev2}'):
analyze_line(line, authors, disp=options.debug)
def name_key(fullname):
m = re.search(' [a-z ]*[A-Za-z-]+$', fullname)
if m:
forename = fullname[:m.start()].strip()
surname = fullname[m.start():].strip()
else:
forename = ''
surname = fullname.strip()
if surname.startswith('van der '):
surname = surname[8:]
if surname.startswith('de '):
surname = surname[3:]
if surname.startswith('von '):
surname = surname[4:]
return (surname.lower(), forename.lower())
if vars(options)['new']:
new_authors = set(authors.keys()).difference(all_authors)
n_authors = list(new_authors)
n_authors.sort(key=name_key)
stdout_b.write(b'\n\n')
for author in n_authors:
stdout_b.write(('- %s\n' % author).encode('utf-8'))
return
try:
authors.pop('GitHub')
except KeyError:
pass
authors = sorted(authors.items(), key=(lambda i: name_key(i[0])))
stdout_b.write(b'\nAuthors\n=======\n\n')
for (author, count) in authors:
author_clean = author.strip('')
if (author in all_authors):
stdout_b.write(f'''* {author_clean} ({count})
'''.encode())
else:
stdout_b.write(f'''* {author_clean} ({count}) +
'''.encode())
stdout_b.write(('\nA total of %(count)d people contributed to this release.\nPeople with a "+" by their names contributed a patch for the first time.\nThis list of names is automatically generated, and may not be fully complete.\n\n' % dict(count=len(authors))).encode('utf-8'))
stdout_b.write(b'\nNOTE: Check this list manually! It is automatically generated and some names\n may be missing.\n') |
def rand_unit_3d():
u = rand_unit_2d()
s = ((ti.random() * 2) - 1)
c = ti.sqrt((1 - (s ** 2)))
return ti.Vector([(c * u[0]), (c * u[1]), s]) |
def fit_model_cv(config_data, model, train_iterator, valid_iterator):
assert (train_iterator.type == 'loader')
nb_epochs = config_data['nb_epochs']
batch_size = config_data['batch_size']
X_train = train_iterator.input_data
y_train = train_iterator.output_data
kf = KFold(n_folds=5, shuffle=True, n=len(X_train[0]))
path = config_data['output_path']
basename = config_data['output_basename']
base_path = join(path, basename)
opath = join(base_path, 'base_model.h5')
model.save_weights(opath)
appendices = []
for (i, (train, test)) in enumerate(kf):
model.load_weights(opath)
input_train = [X[train] for X in X_train]
output_train = [y[train] for y in y_train]
input_valid = [X[test] for X in X_train]
output_valid = [y[test] for y in y_train]
appendix = '_{}'.format(i)
callbacks = run_utils.get_callbacks(config_data, appendix=appendix)
stored_model = True
hist = model.fit(x=input_train, y=output_train, batch_size=batch_size, validation_data=(input_valid, output_valid), nb_epoch=nb_epochs, verbose=1, callbacks=callbacks, class_weight=run_utils.get_classweight(config_data))
appendices.append(appendix)
weights_path = join(base_path, 'best_model{}.h5'.format(appendix))
model.load_weights(weights_path)
oline_test = run_utils.get_evaluation(config_data, model, train_iterator, basename, '')
print(oline_test)
return (hist, stored_model, appendices) |
def add_ml_lib(name, deps=[], path=None, lib_name=None):
c = MLComponent(name, lib_name, path, deps)
reg_component(name, c) |
class TensorInfo():
def __init__(self):
self.tensor_id = (- 1)
self.shape = None
self.dtype = DataType.UNKNOWN
self.is_const = False
self.gaddr = (- 1)
self.gsize = 0
self.loffset = (- 1)
self.nslice = 0
self.hslice = 0
self.l2addr = 0
self.in_layer = None
self.out_layers = [] |
def append_path_after_vad(data_folder, id, list):
file = get_path(data_folder, id)
destin_folder = os.path.join(data_folder, 'processed', (id[:5] + id[(- 4)]))
if (not os.path.exists(destin_folder)):
os.makedirs(destin_folder)
if (not os.path.exists(os.path.join(destin_folder, f'{id}.wav'))):
write_audio(file, os.path.join(destin_folder, f'{id}.wav'))
if os.path.exists(os.path.join(destin_folder, f'{id}.wav')):
list.append(os.path.join(destin_folder, f'{id}.wav')) |
def prepare_const_divs(ctx: LeanGenContext, expr: Expression, to_field: bool) -> List[str]:
hyp_basename = (('h_' + ctx.div_var_basename) + 'c')
const_div_rw = []
for (index, (const_expr, div_const, is_full_expr)) in enumerate(rec_get_const_div_inv(expr, ctx.desc_ctx)):
hyp_name = f'{hyp_basename}{index}'
if is_full_expr:
ctx.add_main(f'have {hyp_name} : ({const_expr} : F) = ({div_const} : Z),')
ctx.add_main('{ apply PRIME.div_eq_const,')
ctx.indent()
ctx.add_main(('{ apply PRIME.cast_ne_zero, norm_num1, rw [PRIME], ' + 'try { simp_int_casts }, norm_num1 },'))
simp_consts = ['PRIME']
ctx.add_main(f"rw [{', '.join(simp_consts)}], try {{ simp_int_casts }}, norm_num1 }},")
ctx.outdent()
else:
f_to_z_name = (hyp_name + '_fz')
if to_field:
const_div_rw.append(f_to_z_name)
ctx.add_main(f'have {hyp_name} : x : F, x / ({const_expr} : Z) = x * ({div_const} : Z),')
ctx.add_main((("{ intro x, apply div_eq_mul_inv', " + 'apply PRIME.int_cast_mul_eq_one, rw [PRIME], ') + 'try { simp_int_casts }, norm_num1 },'))
if to_field:
ctx.add_main((f'have {f_to_z_name} : x : F, x / {const_expr} = x / ({const_expr} : Z), ' + '{ intro x, norm_cast }, '))
const_div_rw.append(hyp_name)
return const_div_rw |
((not tf), 'no TF')
def test_demo_sprint_interface():
import subprocess
subprocess.check_call(['echo', 'travis_fold:start:test_demo_sprint_interface'])
subprocess.check_call([py, os.path.abspath('demos/demo-sprint-interface.py')], cwd='/')
subprocess.check_call(['echo', 'travis_fold:end:test_demo_sprint_interface']) |
class TrainerCallbackForSaving(TrainerCallback):
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
control.should_save = True |
def setup_for_distributed(is_master):
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='')
builtin_print(*args, **kwargs)
builtins.print = print |
def group_norm(out_channels, affine=True, divisor=1):
out_channels = (out_channels // divisor)
dim_per_gp = (cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor)
num_groups = (cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor)
eps = cfg.MODEL.GROUP_NORM.EPSILON
return torch.nn.GroupNorm(get_group_gn(out_channels, dim_per_gp, num_groups), out_channels, eps, affine) |
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser('~')
source_dir = os.path.join(home, 'data', 'cnn', 'questions')
target_dir = 'data/cnn'
glove_dir = os.path.join(home, 'data', 'glove')
parser.add_argument('--source_dir', default=source_dir)
parser.add_argument('--target_dir', default=target_dir)
parser.add_argument('--glove_dir', default=glove_dir)
parser.add_argument('--glove_corpus', default='6B')
parser.add_argument('--glove_vec_size', default=100, type=int)
parser.add_argument('--debug', default=False, type=bool_)
parser.add_argument('--num_sents_th', default=200, type=int)
parser.add_argument('--ques_size_th', default=30, type=int)
parser.add_argument('--width', default=5, type=int)
return parser.parse_args() |
def create_stats_table(data_stats=None):
if ((data_stats is None) or (len(data_stats) == 0)):
data = [{'Stats': '', 'Value': ''}]
else:
data = [{'Stats': key, 'Value': value} for (key, value) in data_stats[''].items()]
table = dash_table.DataTable(id='data-stats', data=data, columns=[{'id': 'Stats', 'name': 'Stats'}, {'id': 'Value', 'name': 'Value'}], editable=False, style_header_conditional=[{'textAlign': 'center', 'font-family': 'Salesforce Sans'}], style_cell_conditional=[{'textAlign': 'center', 'font-family': 'Salesforce Sans'}], style_header=dict(backgroundColor=TABLE_HEADER_COLOR, color='white'), style_data=dict(backgroundColor=TABLE_DATA_COLOR))
return table |
def flags2names(flags):
info = []
for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', 'CARRAY', 'FARRAY']:
if (abs(flags) & getattr(wrap, flagname, 0)):
info.append(flagname)
return info |
class Hypothesis(object):
def __init__(self, tokens, log_probs, state, attn_dists, p_gens, coverage):
self.tokens = tokens
self.log_probs = log_probs
self.state = state
self.attn_dists = attn_dists
self.p_gens = p_gens
self.coverage = coverage
def extend(self, token, log_prob, state, attn_dist, p_gen, coverage):
return Hypothesis(tokens=(self.tokens + [token]), log_probs=(self.log_probs + [log_prob]), state=state, attn_dists=(self.attn_dists + [attn_dist]), p_gens=(self.p_gens + [p_gen]), coverage=coverage)
def latest_token(self):
return self.tokens[(- 1)]
def log_prob(self):
return sum(self.log_probs)
def avg_log_prob(self):
return (self.log_prob / len(self.tokens)) |
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if (not np.isinf(moment_a_mp)):
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return (np.log(moment) * steps) |
def test_sanity_checks():
with pytest.raises(UsageError, match=filters.ERROR_EMPTY_FILTER):
filters.FilterSet().include() |
def span_overlap(s1, s2):
start = max(s1[0], s2[0])
stop = min(s1[1], s2[1])
if (stop > start):
return (start, stop)
return None |
class DecisionTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def launch_docker():
os.getcwd()
os.getuid()
snn_dir = os.path.abspath(Path(os.path.dirname(os.path.realpath(__file__))).parent)
dump_dir = os.path.abspath(Path(snn_dir).parent)
parser = argparse.ArgumentParser()
parser.add_argument('--image', type=str, choices=['cpu', 'gpu', 'gpu10'], help='Use which image gpu or cpu')
parser.add_argument('--dump_dir', default=dump_dir, help='Dir to dump results')
parser.add_argument('--raw_dir', help='Optional dir to point towards data')
args = parser.parse_args()
cmd = 'docker run -it --rm '
cmd += (' --gpus all ' if ('gpu' in args.image) else '')
cmd += f' -v {snn_dir}:/u/home/SuperNNova -v {args.dump_dir}:/u/home/snndump'
if args.raw_dir:
cmd += f' -v {args.raw_dir}:/u/home/raw'
cmd += f' -e HOST_USER_ID={os.getuid()} -e HOST_USER_GID={os.getgid()} rnn-{args.image}:latest'
try:
subprocess.check_call(shlex.split(cmd))
except Exception as err:
print(err)
print('Possible errors:')
print('You may not have a GPU.')
print(f'You may not have built the images ==> call make {args.image}') |
class ChatGPT_SP(AbstractChatGPT):
def __init__(self, api_key: str, api_org: (str | None), model_name='gpt-3.5-turbo-0613', *args, **kwargs):
super().__init__(api_key, api_org, model_name, *args, **kwargs)
def prompt(self):
return [{'role': 'user', 'content': 'I want you to act as a text to SQL model for tabular data.\n I will pass you the schema of the table and one question.\n I want you to parse the question into the SQL command.\n The SQL command must be executable with the schema of the table.\n Do not write explanations. Do not type commands. \n REPLY ONLY WITH THE SQL COMMAND.\n This is an Example:\n Table name: "body-builder", \n Schema: [Name, Surname], \n Questions: "Show all information about each body builder"\n I want you to output:\n "SELECT * FROM "body-builder""\n '}, {'role': 'user', 'content': 'Table name: "student",Schema: [StudentID, Grade, PhoneNumbers]Question: "what are all the phone numbers?"'}, {'role': 'assistant', 'content': 'SELECT "PhoneNumbers" FROM student'}, {'role': 'user', 'content': 'Table name: "student"Schema: [StudentID, Grade, PhoneNumbers]Question: "what is the average grade?"'}, {'role': 'assistant', 'content': 'SELECT AVG(Grade) FROM student'}]
def process_input(self, table: pd.DataFrame, query: str, tbl_name: str) -> (Any | None):
if (tbl_name is None):
raise ValueError('For Semantic Parsing, it is need the table name for the chatgpt input prompt')
schema = table.columns.tolist()
prompt = f'''Table Name: "{tbl_name}",
Schema: {schema},
Question: "{query}"'''
return {'role': 'user', 'content': prompt}
def _normalize_api_output(self, api_output):
prediction: str = api_output.choices[0].message.content
return prediction |
def imagenet_preprocess_input(images, labels):
return (tf.keras.applications.mobilenet_v2.preprocess_input(images), labels) |
def generate_meta_info(save_dir, max_node, divide=40):
aa_nas_bench_ss = get_search_spaces('cell', 'nas-bench-201')
archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False)
print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2))))
random.seed(88)
random.shuffle(archs)
assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0])
assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9])
assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123])
total_arch = len(archs)
num = 50000
indexes_5W = list(range(num))
random.seed(1021)
random.shuffle(indexes_5W)
train_split = sorted(list(set(indexes_5W[:(num // 2)])))
valid_split = sorted(list(set(indexes_5W[(num // 2):])))
assert ((len(train_split) + len(valid_split)) == num)
assert ((train_split[0] == 0) and (train_split[10] == 26) and (train_split[111] == 203) and (valid_split[0] == 1) and (valid_split[10] == 18) and (valid_split[111] == 242)), '{:} {:} {:} - {:} {:} {:}'.format(train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111])
splits = {num: {'train': train_split, 'valid': valid_split}}
info = {'archs': [x.tostr() for x in archs], 'total': total_arch, 'max_node': max_node, 'splits': splits}
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
save_name = (save_dir / 'meta-node-{:}.pth'.format(max_node))
assert (not save_name.exists()), '{:} already exist'.format(save_name)
torch.save(info, save_name)
print('save the meta file into {:}'.format(save_name)) |
class Config():
random_seed: int = 1
min_age: int = 15
start_iter: int = 100
end_iter: int = 200
percent: float = 0.01
harsh_sentence: bool = False
random_sentence_type: bool = False
random_sentence_bias: float = 0.5
consistent_sentence_length: bool = True
mean_sentence_harsh: float = 17.0
mean_sentence_lenient: float = 14.0 |
def perform_auto_vertical_scaling(setup_trainer_and_train, config, num_iters=2):
def launch_process(func, kwargs):
p = ProcessWrapper(target=func, kwargs=kwargs)
p.start()
p.join()
if p.exception:
raise p.exception
def set_num_envs_and_train(num_envs, run_config=config):
run_config['trainer']['num_envs'] = num_envs
run_config['trainer']['train_batch_size'] = num_envs
run_config['trainer']['num_episodes'] = ((num_iters * run_config['trainer']['train_batch_size']) / run_config['env']['episode_length'])
launch_process(setup_trainer_and_train, kwargs={'run_configuration': config, 'verbose': False})
def set_batch_size_per_env_and_train(train_batch_size_per_env, run_config=config):
run_config['trainer']['train_batch_size'] = (train_batch_size_per_env * config['trainer']['num_envs'])
run_config['trainer']['num_episodes'] = ((num_iters * run_config['trainer']['train_batch_size']) / run_config['env']['episode_length'])
launch_process(setup_trainer_and_train, kwargs={'run_configuration': config, 'verbose': False})
num_episodes = config['trainer']['num_episodes']
use_wandb = config['saving'].get('use_wandb', False)
config['saving']['use_wandb'] = False
print(('=' * 80))
print('Determining the maximum number of environment replicas to run in parallel.')
print(('=' * 80))
num_envs = config['trainer']['num_envs']
max_envs = best_param_search(low=num_envs, func=set_num_envs_and_train)
config['trainer']['num_envs'] = max_envs
print(('=' * 80))
print('Determining the maximum training batch size.')
print(('=' * 80))
max_batch_size_per_env = best_param_search(func=set_batch_size_per_env_and_train)
config['trainer']['train_batch_size'] = (max_batch_size_per_env * config['trainer']['num_envs'])
config['trainer']['num_episodes'] = num_episodes
config['saving']['use_wandb'] = use_wandb
return config |
class Transformation(schema_utils.Model):
name = types.StringType()
parametrization = ReferenceType(Parametrization)
parameter_list = types.ListType(types.ModelType(SetParam))
transformation = OptplanPolyModelType(optplan.NodeMetaType.TRANSFORMATION) |
(Output('cytoscape-hover-output', 'children'), Input('cytoscape', 'mouseoverNodeData'))
def hover_graph_node(data):
if (data is None):
return no_update
return f"Node ID: {data['id']}" |
_decorator(0)
def get_fans(html):
cont = public.get_left(html)
if (cont == ''):
return 0
soup = BeautifulSoup(cont, 'lxml')
try:
return int(soup.find_all('strong')[1].get_text())
except Exception:
return 0 |
def filter_clashed_by_priority(chunks: List[tuple], allow_level: int=NESTED):
filtered_chunks = []
for ck in chunks:
if all(((not _is_clashed(ck, ex_ck, allow_level=allow_level)) for ex_ck in filtered_chunks)):
filtered_chunks.append(ck)
return filtered_chunks |
def unzip(zip_path):
zip_files = mmcv.scandir(zip_path, suffix='zip', recursive=False)
import shutil
import zipfile
unzip_folders = []
for zip_file in zip_files:
zip_file = osp.join(zip_path, zip_file)
unzip_folder = zip_file.replace('.zip', '').split('_part')[0]
print(f'Unzip {zip_file} to {unzip_folder}')
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(unzip_folder)
data_name = osp.basename(unzip_folder)
data_type = data_name.split('_')[0]
if osp.isdir(osp.join(unzip_folder, data_type, data_name)):
data_folder = osp.join(unzip_folder, data_type, data_name)
for i in os.listdir(data_folder):
shutil.move(osp.join(data_folder, i), unzip_folder)
shutil.rmtree(osp.join(unzip_folder, data_type))
unzip_folders.append(unzip_folder)
return unzip_folders |
class KleberTreeNode(Element):
def __init__(self, parent_obj, node_weight, dominant_root, parent_node=None):
self.parent_node = parent_node
self.children = []
self.weight = node_weight
self.up_root = dominant_root
Element.__init__(self, parent_obj)
_attribute
def depth(self):
depth = (- 1)
cur = self
while (cur is not None):
depth += 1
cur = cur.parent_node
return depth
_method
def multiplicity(self):
if (self.parent_node is None):
return Integer(1)
mult = Integer(1)
for (a, m) in self.up_root:
p = self.weight[a]
for (r, s) in self.parent().B:
if ((r == a) and (s > self.depth)):
p -= (s - self.depth)
mult *= binomial((m + p), m)
prev_up_root = self.up_root
cur = self.parent_node
while (cur.parent_node is not None):
root_diff = (cur.up_root - prev_up_root)
for (a, m) in root_diff:
p = cur.weight[a]
for (r, s) in self.parent().B:
if ((r == a) and (s > cur.depth)):
p -= (s - cur.depth)
mult *= binomial((m + p), m)
prev_up_root = cur.up_root
cur = cur.parent_node
return mult
def __hash__(self):
return (hash(self.depth) ^ hash(self.weight))
def _richcmp_(self, rhs, op):
lx = self.depth
rx = rhs.depth
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
lx = self.parent_node
rx = rhs.parent_node
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
return richcmp(self.weight, rhs.weight, op)
def _repr_(self):
return ('Kleber tree node with weight %s and upwards edge root %s' % (list(self.weight.to_vector()), list(self.up_root.to_vector())))
def _latex_(self):
ret_str = 'V_{'
if (self.multiplicity() != 1):
ret_str = (repr(self.multiplicity()) + ret_str)
for pair in self.weight:
if (pair[1] > 1):
ret_str += (((repr(pair[1]) + '\\omega_{') + repr(pair[0])) + '}+')
elif (pair[1] == 1):
ret_str += (('\\omega_{' + repr(pair[0])) + '}+')
if (ret_str[(- 1)] == '{'):
ret_str += '0}'
else:
ret_str = (ret_str[:(- 1)] + '}')
ct = self.parent()._cartan_type
if ((ct.type() == 'BC') or (ct.dual().type() == 'BC')):
return (('[' + ret_str) + ']')
elif (not ct.is_simply_laced()):
s_factors = self.parent()._folded_ct.scaling_factors()
gamma = max(s_factors)
if (gamma > 1):
L = [self.parent()._folded_ct.folding_orbit()[a][0] for a in range(1, len(s_factors)) if (s_factors[a] == gamma)]
else:
L = []
if (((self.depth % gamma) == 0) or all(((self.up_root[a] == 0) for a in L))):
return (('[' + ret_str) + ']')
return ret_str |
def test():
net = MobileNetV2()
x = Variable(torch.randn(2, 3, 32, 32))
y = net(x)
print(y.size()) |
def run(portfolio, executable, sas_file, plan_manager, time, memory):
attributes = get_portfolio_attributes(portfolio)
configs = attributes['CONFIGS']
optimal = attributes['OPTIMAL']
final_config = attributes.get('FINAL_CONFIG')
final_config_builder = attributes.get('FINAL_CONFIG_BUILDER')
if ('TIMEOUT' in attributes):
returncodes.exit_with_driver_input_error('The TIMEOUT attribute in portfolios has been removed. Please pass a time limit to fast-downward.py.')
if (time is None):
if (sys.platform == 'win32'):
returncodes.exit_with_driver_unsupported_error(limits.CANNOT_LIMIT_TIME_MSG)
else:
returncodes.exit_with_driver_input_error('Portfolios need a time limit. Please pass --search-time-limit or --overall-time-limit to fast-downward.py.')
timeout = (util.get_elapsed_time() + time)
if optimal:
exitcodes = run_opt(configs, executable, sas_file, plan_manager, timeout, memory)
else:
exitcodes = run_sat(configs, executable, sas_file, plan_manager, final_config, final_config_builder, timeout, memory)
return returncodes.generate_portfolio_exitcode(list(exitcodes)) |
class BucketSampler(Sampler):
def __init__(self, num_buckets=10, batch_size=None, seq_len_field_name='seq_len'):
self.num_buckets = num_buckets
self.batch_size = batch_size
self.seq_len_field_name = seq_len_field_name
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def __call__(self, data_set):
if (self.batch_size is None):
raise RuntimeError('batch_size is None.')
seq_lens = data_set.get_all_fields()[self.seq_len_field_name].content
total_sample_num = len(seq_lens)
bucket_indexes = []
assert (total_sample_num >= self.num_buckets), 'The number of samples is smaller than the number of buckets.'
num_sample_per_bucket = (total_sample_num // self.num_buckets)
for i in range(self.num_buckets):
bucket_indexes.append([(num_sample_per_bucket * i), (num_sample_per_bucket * (i + 1))])
bucket_indexes[(- 1)][1] = total_sample_num
sorted_seq_lens = list(sorted([(idx, seq_len) for (idx, seq_len) in zip(range(total_sample_num), seq_lens)], key=(lambda x: x[1])))
batchs = []
left_init_indexes = []
for b_idx in range(self.num_buckets):
start_idx = bucket_indexes[b_idx][0]
end_idx = bucket_indexes[b_idx][1]
sorted_bucket_seq_lens = sorted_seq_lens[start_idx:end_idx]
left_init_indexes.extend([tup[0] for tup in sorted_bucket_seq_lens])
num_batch_per_bucket = (len(left_init_indexes) // self.batch_size)
np.random.shuffle(left_init_indexes)
for i in range(num_batch_per_bucket):
batchs.append(left_init_indexes[(i * self.batch_size):((i + 1) * self.batch_size)])
left_init_indexes = left_init_indexes[(num_batch_per_bucket * self.batch_size):]
if (left_init_indexes != 0):
batchs.append(left_init_indexes)
np.random.shuffle(batchs)
return list(chain(*batchs)) |
def test_union_float64_datetime64_parameters():
t = UnionType([NumpyType('float64'), NumpyType('datetime64')], {'__array__': 'Something'})
assert (str(parser.parse(str(t))) == str(t)) |
class Func_chebyshev_T(ChebyshevFunction):
def __init__(self):
ChebyshevFunction.__init__(self, 'chebyshev_T', nargs=2, conversions=dict(maxima='chebyshev_t', mathematica='ChebyshevT', sympy='chebyshevt', giac='tchebyshev1'))
def _latex_(self):
return 'T_n'
def _print_latex_(self, n, z):
return 'T_{{{}}}\\left({}\\right)'.format(latex(n), latex(z))
def _eval_special_values_(self, n, x):
if (x == 1):
return x
if (x == (- 1)):
return (x ** n)
if (x == 0):
return (((1 + ((- 1) ** n)) * ((- 1) ** (n / 2))) / 2)
raise ValueError('no special value found')
def _evalf_(self, n, x, **kwds):
try:
real_parent = kwds['parent']
except KeyError:
real_parent = parent(x)
if (not isinstance(real_parent, (sage.rings.abc.RealField, sage.rings.abc.ComplexField))):
if (x in RR):
x = RR(x)
real_parent = RR
elif (x in CC):
x = CC(x)
real_parent = CC
if (not isinstance(real_parent, (sage.rings.abc.RealField, sage.rings.abc.ComplexField))):
raise TypeError('cannot evaluate chebyshev_T with parent {}'.format(real_parent))
return _mpmath_utils_call(_mpmath_chebyt, n, x, parent=real_parent)
def eval_formula(self, n, x):
if (n < 0):
return self.eval_formula((- n), x)
elif (n == 0):
return parent(x).one()
res = parent(x).zero()
for j in range(((n // 2) + 1)):
f = ((factorial(((n - 1) - j)) / factorial(j)) / factorial((n - (2 * j))))
res += ((((- 1) ** j) * ((2 * x) ** (n - (2 * j)))) * f)
res *= (n / 2)
return res
def eval_algebraic(self, n, x):
if (n == 0):
return parent(x).one()
if (n < 0):
return self._eval_recursive_((- n), x)[0]
return self._eval_recursive_(n, x)[0]
def _eval_recursive_(self, n, x, both=False):
if (n == 1):
return (x, parent(x).one())
assert (n >= 2)
(a, b) = self._eval_recursive_(((n + 1) // 2), x, (both or (n % 2)))
if ((n % 2) == 0):
return ((((2 * a) * a) - 1), (both and (((2 * a) * b) - x)))
else:
return ((((2 * a) * b) - x), (both and (((2 * b) * b) - 1)))
def _eval_numpy_(self, n, x):
from scipy.special import eval_chebyt
return eval_chebyt(n, x)
def _derivative_(self, n, x, diff_param):
if (diff_param == 0):
raise NotImplementedError('derivative w.r.t. to the index is not supported yet')
elif (diff_param == 1):
return (n * chebyshev_U((n - 1), x))
raise ValueError('illegal differentiation parameter {}'.format(diff_param)) |
def make_init_buffer_state(sdfg):
state = sdfg.add_state('init_buffer')
hist_buffer = state.add_array('hist_buffer', (num_bins,), dace.uint32, transient=True, storage=dace.dtypes.StorageType.FPGA_Local)
(entry, exit) = state.add_map('init_map', {'i': '0:num_bins'})
tasklet = state.add_tasklet('zero', {}, {'out'}, 'out = 0')
state.add_nedge(entry, tasklet, dace.memlet.Memlet())
state.add_memlet_path(tasklet, exit, hist_buffer, src_conn='out', memlet=dace.memlet.Memlet.simple(hist_buffer, 'i'))
return state |
class _Pooling3D(Layer):
def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, **kwargs):
super(_Pooling3D, self).__init__(**kwargs)
if (strides is None):
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if (self.data_format == 'channels_first'):
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
elif (self.data_format == 'channels_last'):
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0], self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1], self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2], self.padding, self.strides[2])
if (self.data_format == 'channels_first'):
return (input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3)
elif (self.data_format == 'channels_last'):
return (input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4])
def _pooling_function(self, inputs, pool_size, strides, padding, data_format):
raise NotImplementedError
def call(self, inputs):
output = self._pooling_function(inputs=inputs, pool_size=self.pool_size, strides=self.strides, padding=self.padding, data_format=self.data_format)
return output
def get_config(self):
config = {'pool_size': self.pool_size, 'padding': self.padding, 'strides': self.strides, 'data_format': self.data_format}
base_config = super(_Pooling3D, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def _run_test_wrapper(root: str, test: str, timeout: float, shared_list: list):
shared_list.append(code_metrics_helper.run_test(root, test, timeout)) |
def convnext_xlarge_config() -> ml_collections.ConfigDict:
configs = convnext_large_config()
configs.dims = [256, 512, 1024, 2048]
return configs |
class DownloadProgressBar(tqdm):
def __init__(self, iterable: (Iterable | None)=None, desc: (str | None)=None, total: ((int | float) | None)=None, leave: (bool | None)=True, file: ((io.TextIOWrapper | io.StringIO) | None)=None, ncols: (int | None)=None, mininterval: (float | None)=0.1, maxinterval: (float | None)=10.0, miniters: ((int | float) | None)=None, use_ascii: ((bool | str) | None)=None, disable: (bool | None)=False, unit: (str | None)='it', unit_scale: (((bool | int) | float) | None)=False, dynamic_ncols: (bool | None)=False, smoothing: (float | None)=0.3, bar_format: (str | None)=None, initial: ((int | float) | None)=0, position: (int | None)=None, postfix: (dict | None)=None, unit_divisor: (float | None)=1000, write_bytes: (bool | None)=None, lock_args: (tuple | None)=None, nrows: (int | None)=None, colour: (str | None)=None, delay: (float | None)=0, gui: (bool | None)=False, **kwargs):
super().__init__(iterable=iterable, desc=desc, total=total, leave=leave, file=file, ncols=ncols, mininterval=mininterval, maxinterval=maxinterval, miniters=miniters, ascii=use_ascii, disable=disable, unit=unit, unit_scale=unit_scale, dynamic_ncols=dynamic_ncols, smoothing=smoothing, bar_format=bar_format, initial=initial, position=position, postfix=postfix, unit_divisor=unit_divisor, write_bytes=write_bytes, lock_args=lock_args, nrows=nrows, colour=colour, delay=delay, gui=gui, **kwargs)
self.total: ((int | float) | None)
def update_to(self, chunk_number: int=1, max_chunk_size: int=1, total_size=None) -> None:
if (total_size is not None):
self.total = total_size
self.update(((chunk_number * max_chunk_size) - self.n)) |
def test_ByteMaskedArray_NumpyArray():
v2a = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], np.int8)), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True)
assert (to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a))
v2b = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([0, 1, 0, 1, 0], np.int8)), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False)
assert (to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)) |
class ImgDataset(torch.utils.data.Dataset):
def __init__(self, imgs, labels=None, alb_transform=None):
self.imgs = imgs
self.labels = labels
self.alb_transform = alb_transform
def __getitem__(self, idx):
if (self.alb_transform is not None):
img = self.alb_transform(image=self.imgs[idx])['image']
else:
img = self.imgs[idx]
if (self.labels is not None):
return (img, self.labels[idx])
else:
return img
def __len__(self):
return len(self.imgs) |
class ContinuousGridworld():
def __init__(self, grid_files, switch_grid_every=None, start_pos=(0.0, 0.0), dt=0.1, num_collision_steps=10, grid_kwargs=None, act_noise=0.0):
self.grid_files = grid_files
self.switch_grid_every = switch_grid_every
self.start_pos = start_pos
self.dt = dt
self.act_noise = act_noise
self.num_collision_steps = num_collision_steps
self.grid_kwargs = (grid_kwargs if (grid_kwargs is not None) else dict())
self._grids_dir = os.path.join(os.path.dirname(__file__), 'grids')
self._grid = []
self._num_steps_total = 0
self._cur_grid_ind = 0
self._x = np.array(start_pos)
self._agent_infos = dict(env_infos=dict())
obs_dim = self.parse_grid(self.grid_files[self._cur_grid_ind])
self.observation_space = gym.spaces.Box((- np.ones(obs_dim)), np.ones(obs_dim))
self.action_space = gym.spaces.Box(np.array([(- 1), (- 1)]), np.array([1, 1]))
def step(self, action):
action += (np.random.randn(*action.shape) * self.act_noise)
action = np.clip(action, (- 1), 1)
ddt = (self.dt / self.num_collision_steps)
for _ in range(self.num_collision_steps):
cur_ind = self.get_index(self._x)
cur_tile = self._grid[cur_ind[1]][cur_ind[0]]
state_infos = self.get_state_infos()
delta_x = 0
if cur_tile.can_pass_through(state_infos, self._agent_infos):
speed = cur_tile.get_speed(state_infos, self._agent_infos)
delta_x += ((ddt * action) * speed)
else:
break
next_x = np.clip((self._x + delta_x), (- 1), 1)
next_ind = self.get_index(next_x)
next_tile = self._grid[next_ind[1]][next_ind[0]]
next_state_infos = self.get_state_infos(next_x, (self._num_steps_total + 1))
if next_tile.can_pass_through(next_state_infos, self._agent_infos):
self._x = next_x
else:
break
state_infos = self.get_state_infos()
for row in self._grid:
for tile in row:
tile.update_agent_infos(state_infos, self._agent_infos)
self._agent_infos['env_infos']['x'] = self._x[0]
self._agent_infos['env_infos']['y'] = self._x[1]
next_obs = self.get_obs(state_infos=state_infos)
reward = self.get_reward(state_infos=state_infos)
done = False
env_infos = copy.deepcopy(self._agent_infos['env_infos'])
self._num_steps_total += 1
if ((self.switch_grid_every is not None) and ((self._num_steps_total % self.switch_grid_every) == 0)):
self.advance_grid()
return (next_obs, reward, done, env_infos)
def reset(self):
self._x = np.array(self.start_pos)
self._agent_infos = dict()
for row in self._grid:
for tile in row:
tile.reset(self._agent_infos)
return self.get_obs()
def get_meta_infos(self):
return dict(grid_file=self.grid_files[self._cur_grid_ind], num_steps=self._num_steps_total, cur_grid_ind=self._cur_grid_ind, x=self._x.copy(), agent_infos=copy.deepcopy(self._agent_infos))
def get_obs(self, state_infos=None):
if (state_infos is None):
state_infos = self.get_state_infos()
obs = [self._x]
for row in self._grid:
for tile in row:
obs.append(tile.get_obs(state_infos, self._agent_infos))
return np.concatenate(obs)
def get_reward(self, state_infos=None):
if (state_infos is None):
state_infos = self.get_state_infos()
reward = 0
for row in self._grid:
for tile in row:
reward += tile.get_reward(state_infos, self._agent_infos)
reward = min(max(reward, (- 100)), 100)
return reward
def get_state_infos(self, x=None, num_steps_total=None):
if (x is None):
x = self._x
if (num_steps_total is None):
num_steps_total = self._num_steps_total
inds = self.get_index(x)
state_infos = dict(num_steps_total=num_steps_total, agent_inds=inds, agent_position=x)
return state_infos
def advance_grid(self):
self._cur_grid_ind = ((self._cur_grid_ind + 1) % len(self.grid_files))
self.parse_grid(self.grid_files[self._cur_grid_ind])
def get_position(self, inds, lens=None):
if (lens is None):
lens = (len(self._grid[0]), len(self._grid))
(inds, lens) = (np.array(inds), np.array(lens))
x = ((2 * (((2 * inds) + 1) / (2 * lens))) - 1)
return x
def get_index(self, x, lens=None):
if (lens is None):
lens = (len(self._grid[0]), len(self._grid))
lens = np.array(lens)
ind_vals = ((0.5 * (x + 1)) * lens)
inds = np.rint(ind_vals).astype(np.int)
inds[0] = min(inds[0], (lens[0] - 1))
inds[1] = min(inds[1], (lens[1] - 1))
return (inds[0], inds[1])
def parse_grid(self, grid_file):
file_path = os.path.join(self._grids_dir, grid_file)
rows = []
with open((file_path + '.txt'), 'r') as f:
row = f.readline()
while row:
rows.append(row[:(- 1)])
row = f.readline()
lens = (len(rows[0]), len(rows))
self._grid = []
obs_dim = 2
tile_classes = set()
(x, y) = (0, 0)
for row in rows:
self._grid.append([])
x = 0
for char in row:
index = (x, y)
tile_class = CHAR_TO_TILE[char]
tile_classes.add(tile_class)
tile = tile_class(index_in_grid=index, position_in_grid=self.get_position(index, lens), **self.grid_kwargs)
self._grid[(- 1)].append(tile)
if (tile_class not in tile_classes):
tile_classes.add(tile_class)
obs_dim += tile.class_info_dim
obs_dim += tile.unique_info_dim
x += 1
y += 1
return obs_dim
def get_rgb_array(self):
state_infos = self.get_state_infos()
agent_infos = self._agent_infos
rgb_array = np.zeros((len(self._grid[0]), len(self._grid), 3))
for (y, row) in enumerate(self._grid):
for (x, tile) in enumerate(row):
rgb_array[(x, y)] = tile.get_plot_color(state_infos, agent_infos)
return rgb_array |
class SingleDiscCond(nn.Module):
def __init__(self, nc=None, ndf=None, start_sz=256, end_sz=8, head=None, patch=False, c_dim=1000, cmap_dim=64, rand_embedding=False):
super().__init__()
self.cmap_dim = cmap_dim
nfc_midas = {4: 512, 8: 512, 16: 256, 32: 128, 64: 64, 128: 64, 256: 32, 512: 16, 1024: 8}
if (start_sz not in nfc_midas.keys()):
sizes = np.array(list(nfc_midas.keys()))
start_sz = sizes[np.argmin(abs((sizes - start_sz)))]
self.start_sz = start_sz
if (ndf is None):
nfc = nfc_midas
else:
nfc = {k: ndf for (k, v) in nfc_midas.items()}
if ((nc is not None) and (head is None)):
nfc[start_sz] = nc
layers = []
if head:
layers += [conv2d(nc, nfc[256], 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)]
DB = (DownBlockPatch if patch else DownBlock)
while (start_sz > end_sz):
layers.append(DB(nfc[start_sz], nfc[(start_sz // 2)]))
start_sz = (start_sz // 2)
self.main = nn.Sequential(*layers)
self.cls = conv2d(nfc[end_sz], self.cmap_dim, 4, 1, 0, bias=False)
embed_path = 'in_embeddings/tf_efficientnet_lite0.pkl'
with open(embed_path, 'rb') as f:
self.embed = pickle.Unpickler(f).load()['embed']
print(f'loaded imagenet embeddings from {embed_path}: {self.embed}')
if rand_embedding:
self.embed.__init__(num_embeddings=self.embed.num_embeddings, embedding_dim=self.embed.embedding_dim)
print(f'initialized embeddings with random weights')
self.embed_proj = FullyConnectedLayer(self.embed.embedding_dim, self.cmap_dim, activation='lrelu')
def forward(self, x, c):
h = self.main(x)
out = self.cls(h)
cmap = self.embed_proj(self.embed(c.argmax(1))).unsqueeze((- 1)).unsqueeze((- 1))
out = ((out * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)))
return out |
class TimesformerModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_dataloader(logger, args, input_file, is_training, batch_size, num_epochs, tokenizer, index=None):
n_paragraphs = args.n_paragraphs
if ((not is_training) and (',' in n_paragraphs)):
n_paragraphs = n_paragraphs.split(',')[(- 1)]
feature_save_path = input_file.replace('.json', '-{}-{}-{}.pkl'.format(args.max_seq_length, n_paragraphs, args.max_n_answers))
if os.path.exists(feature_save_path):
logger.info('Loading saved features from {}'.format(feature_save_path))
with open(feature_save_path, 'rb') as f:
features = pkl.load(f)
train_features = features['features']
examples = features.get('examples', None)
else:
examples = read_squad_examples(logger=logger, args=args, input_file=input_file, debug=args.debug)
train_features = convert_examples_to_features(logger=logger, args=args, examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, max_n_answers=(args.max_n_answers if is_training else 1), is_training=is_training)
if (not args.debug):
logger.info('Saving features to: {}'.format(feature_save_path))
save_features = {'features': train_features}
if (not is_training):
save_features['examples'] = examples
with open(feature_save_path, 'wb') as f:
pkl.dump(save_features, f)
n_features = sum([len(f) for f in train_features])
num_train_steps = int(((len(train_features) / batch_size) * num_epochs))
if (examples is not None):
logger.info(' Num orig examples = %d', len(examples))
logger.info(' Num split examples = %d', n_features)
logger.info(' Batch size = %d', batch_size)
if is_training:
logger.info(' Num steps = %d', num_train_steps)
dataloader = MyDataLoader(features=train_features, batch_size=batch_size, is_training=is_training)
flattened_features = [f for _features in train_features for f in _features]
return (dataloader, examples, flattened_features, num_train_steps) |
def get_corrections(y_pred, y_true):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if (len(y_pred.shape) > 1):
y_pred = np.asarray([np.argmax(p) for p in y_pred])
if (len(y_true.shape) > 1):
y_true = np.asarray([np.argmax(p) for p in y_true])
corrections = [i for i in range(y_true.shape[0]) if (y_pred[i] == y_true[i])]
return corrections |
def find_all_files(root, suffix=None):
res = []
for (root, _, files) in os.walk(root):
for f in files:
if ((suffix is not None) and (not f.endswith(suffix))):
continue
res.append(os.path.join(root, f))
return res |
class Optimizer(object):
def __init__(self, config=None):
self._accumulators = {}
self._global_step = tf.Variable(0.0, trainable=False, name='global_step')
self._config = config
return
def from_optimizer(cls, optimizer):
new_optimizer = cls(config=optimizer._config)
new_optimizer._accumulators = optimizer._accumulators
new_optimizer._global_step = optimizer._global_step
return new_optimizer
def minimize(self, loss, variables=None):
variables = (variables or tf.trainable_variables())
gradients = tf.gradients(loss, variables, colocate_gradients_with_ops=True, gate_gradients=True, aggregation_method=2)
gradients = {variable: gradient for (variable, gradient) in zip(variables, gradients) if (gradient is not None)}
variable_steps = {}
variable_indices = {}
updates = [tf.assign_add(self.global_step, 1)]
for (variable, gradient) in six.iteritems(gradients):
if isinstance(gradient, tf.Tensor):
(step, update) = self.dense_update(gradient, variable)
variable_steps[variable] = step
updates.extend(update)
else:
(step, indices, update) = self.sparse_update(gradient, variable)
variable_steps[variable] = step
variable_indices[variable] = indices
updates.extend(update)
variable_steps = self.clip_by_global_norm(variable_steps)
for (variable, step) in six.iteritems(variable_steps):
if (variable in variable_indices):
indices = variable_indices[variable]
updates.append(tf.scatter_sub(variable, indices, step))
else:
updates.append(tf.assign_sub(variable, step))
return tf.tuple(updates)[0]
def dense_adam_update(self, gradient, variable):
raise NotImplementedError()
def dense_moving_average(self, variable, accumulant, name='Accumulator', decay=0.9):
accumulant = tf.clip_by_value(accumulant, (- self.clip), self.clip)
accumulator = self.get_accumulator(name, variable)
iteration = self.get_accumulator('{}/iteration'.format(name), variable, shape=[])
iteration = tf.assign_add(iteration, 1)
if (decay < 1):
current_decay = ((decay * (1 - (decay ** (iteration - 1)))) / (1 - (decay ** iteration)))
else:
current_decay = ((iteration - 1) / iteration)
accumulator = tf.assign(accumulator, (current_decay * accumulator))
accumulator = tf.assign_add(accumulator, ((1 - current_decay) * accumulant))
return (accumulator, iteration)
def sparse_update(self, gradient, variable):
raise NotImplementedError()
def sparse_moving_average(self, variable, unique_indices, accumulant, name='Accumulator', decay=0.9):
accumulant = tf.clip_by_value(accumulant, (- self.clip), self.clip)
first_dim = variable.get_shape().as_list()[0]
accumulator = self.get_accumulator(name, variable)
indexed_accumulator = tf.gather(accumulator, unique_indices)
iteration = self.get_accumulator('{}/iteration'.format(name), variable, shape=[first_dim, 1])
indexed_iteration = tf.gather(iteration, unique_indices)
iteration = tf.scatter_add(iteration, unique_indices, tf.ones_like(indexed_iteration))
indexed_iteration = tf.gather(iteration, unique_indices)
if (decay < 1):
current_indexed_decay = ((decay * (1 - (decay ** (indexed_iteration - 1)))) / (1 - (decay ** indexed_iteration)))
else:
current_indexed_decay = ((indexed_iteration - 1) / indexed_iteration)
accumulator = tf.scatter_update(accumulator, unique_indices, (current_indexed_decay * indexed_accumulator))
accumulator = tf.scatter_add(accumulator, unique_indices, ((1 - current_indexed_decay) * accumulant))
return (accumulator, iteration)
def clip_by_global_norm(self, variable_steps):
variable_step_list = list(variable_steps.values())
(variable_step_list, _) = tf.clip_by_global_norm(variable_step_list, self.clip)
variable_steps = dict(zip(variable_steps.keys(), variable_step_list))
return variable_steps
def get_accumulator(self, name, original_variable, shape=None):
key = (original_variable, name)
if (key in self._accumulators):
variable = self._accumulators[key]
else:
shape = (shape if (shape is not None) else original_variable.get_shape().as_list())
initializer = tf.zeros_initializer
with tf.control_dependencies([original_variable]):
with tf.variable_scope(original_variable.op.name, reuse=False):
with tf.device(original_variable.device):
variable = tf.get_variable(name, shape=shape, initializer=initializer, collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'non_save_variables'], trainable=False)
self._accumulators[key] = variable
return variable
def learning_rate(self):
return self._config.getfloat(self, 'learning_rate')
def decay_rate(self):
return self._config.getfloat(self, 'decay_rate')
def annealed_learning_rate(self):
return (self.learning_rate * tf.exp(((- self.decay_rate) * self.global_step)))
def mu(self):
return self._config.getfloat(self, 'mu')
def nu(self):
return self._config.getfloat(self, 'nu')
def gamma(self):
return self._config.getfloat(self, 'gamma')
def clip(self):
return self._config.getfloat(self, 'clip')
def epsilon(self):
return self._config.getfloat(self, 'epsilon')
def global_step(self):
return self._global_step |
def find_matched_references(collab_attr_list, all_collborators):
matched_ref_dict = {}
previous_collaborator = ''
for collborator_name in all_collborators:
matched_ref_dict[collborator_name.input] = []
for attr in collab_attr_list:
di = {attr: []}
for collab in all_collborators:
attr_id = id(getattr(collab, attr))
collaborator_name = collab.input
if (attr_id not in di.get(attr)):
di.get(attr).append(attr_id)
else:
matched_ref_dict.get(collaborator_name).append(attr)
print((f'{bcolors.FAIL} ... Reference test failed - {collaborator_name} sharing same ' + f'{attr} reference with {previous_collaborator} {bcolors.ENDC}'))
previous_collaborator = collaborator_name
return matched_ref_dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.