code stringlengths 101 5.91M |
|---|
class ModelOutputTester(unittest.TestCase):
def test_get_attributes(self):
x = ModelOutputTest(a=30)
self.assertEqual(x.a, 30)
self.assertIsNone(x.b)
self.assertIsNone(x.c)
with self.assertRaises(AttributeError):
_ = x.d
def test_index_with_ints_and_slices(self):
x = ModelOutputTest(a=30, b=10)
self.assertEqual(x[0], 30)
self.assertEqual(x[1], 10)
self.assertEqual(x[:2], (30, 10))
self.assertEqual(x[:], (30, 10))
x = ModelOutputTest(a=30, c=10)
self.assertEqual(x[0], 30)
self.assertEqual(x[1], 10)
self.assertEqual(x[:2], (30, 10))
self.assertEqual(x[:], (30, 10))
def test_index_with_strings(self):
x = ModelOutputTest(a=30, b=10)
self.assertEqual(x['a'], 30)
self.assertEqual(x['b'], 10)
with self.assertRaises(KeyError):
_ = x['c']
x = ModelOutputTest(a=30, c=10)
self.assertEqual(x['a'], 30)
self.assertEqual(x['c'], 10)
with self.assertRaises(KeyError):
_ = x['b']
def test_dict_like_properties(self):
x = ModelOutputTest(a=30)
self.assertEqual(list(x.keys()), ['a'])
self.assertEqual(list(x.values()), [30])
self.assertEqual(list(x.items()), [('a', 30)])
self.assertEqual(list(x), ['a'])
x = ModelOutputTest(a=30, b=10)
self.assertEqual(list(x.keys()), ['a', 'b'])
self.assertEqual(list(x.values()), [30, 10])
self.assertEqual(list(x.items()), [('a', 30), ('b', 10)])
self.assertEqual(list(x), ['a', 'b'])
x = ModelOutputTest(a=30, c=10)
self.assertEqual(list(x.keys()), ['a', 'c'])
self.assertEqual(list(x.values()), [30, 10])
self.assertEqual(list(x.items()), [('a', 30), ('c', 10)])
self.assertEqual(list(x), ['a', 'c'])
with self.assertRaises(Exception):
x = x.update({'d': 20})
with self.assertRaises(Exception):
del x['a']
with self.assertRaises(Exception):
_ = x.pop('a')
with self.assertRaises(Exception):
_ = x.setdefault('d', 32)
def test_set_attributes(self):
x = ModelOutputTest(a=30)
x.a = 10
self.assertEqual(x.a, 10)
self.assertEqual(x['a'], 10)
def test_set_keys(self):
x = ModelOutputTest(a=30)
x['a'] = 10
self.assertEqual(x.a, 10)
self.assertEqual(x['a'], 10)
def test_instantiate_from_dict(self):
x = ModelOutputTest({'a': 30, 'b': 10})
self.assertEqual(list(x.keys()), ['a', 'b'])
self.assertEqual(x.a, 30)
self.assertEqual(x.b, 10) |
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if (not os.path.exists(dirname)):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for (key, vals) in self.infos.iteritems():
msgs.append(('%s %.6f' % (key, np.mean(vals))))
msg = '\n'.join(msgs)
self.log_file.write((msg + '\n'))
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write((msg + '\n'))
self.log_file.flush()
print(msg) |
class TestSoftCopyAttention(object):
def copy_source(self):
return float_tensor_var([[0.0, 0.2, 0.4, 0.6], [0.1, 0.3, 0.5, 0.7], [0.1, 0.2, 0.3, 0.4], [0.01, 0.02, 0.03, 0.04], [0.01, 0.03, 0.05, 0.07]])
def alignments(self):
values = GPUVariable(torch.LongTensor([[1, 3], [1, 1], [3, 2], [3, 0], [0, 0]]))
mask = float_tensor_var([[1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
return SequenceBatch(values, mask, left_justify=False)
def test_is_subset(self):
a = float_tensor_var([[1, 1, 0], [0, 1, 0]])
b = float_tensor_var([[1, 1, 0], [0, 1, 1]])
c = float_tensor_var([[1, 1, 0], [0, 0, 1]])
assert SoftCopyAttention._is_subset(a, a)
assert SoftCopyAttention._is_subset(a, b)
assert (not SoftCopyAttention._is_subset(b, a))
assert (not SoftCopyAttention._is_subset(a, c))
assert (not SoftCopyAttention._is_subset(c, a))
def test_forward(self, copy_source, alignments):
(memory_dim, query_dim, attn_dim) = (4, 3, 2)
attn = SoftCopyAttention(memory_dim, query_dim, attn_dim)
attn_ex = AttentionExample()
(memory_transform, query_transform, v_transform) = attn_ex.params
memory_cells = attn_ex.memory_cells
query = attn_ex.query
base_attn = attn._base_attention
base_attn.memory_transform.data.set_(float_tensor(memory_transform))
base_attn.query_transform.data.set_(float_tensor(query_transform))
base_attn.v_transform.data.set_(float_tensor(v_transform))
exp_logits = torch.exp(attn_ex.correct_logits)
boost = float_tensor_var([[0.2, 0], [0.3, 0.3], [0, 0], [0, 0], [0, 0.01]])
correct_logits = torch.log((exp_logits + boost))
attn_out = attn(memory_cells, query, alignments, copy_source)
assert_tensor_equal(attn_out.logits, correct_logits)
assert_tensor_equal(attn_out.orig_logits, attn_ex.correct_logits)
assert_tensor_equal(attn_out.boost, boost) |
def ResNet18(in_channels, num_classes):
return ResNet(BasicBlock, [2, 2, 2, 2], in_channels=in_channels, num_classes=num_classes) |
def get_model(input_shape, weights_dir, resume, bayesian, vnet, prior_std, kernel_size, activation, padding, kl_alpha, kl_start_epoch, kl_alpha_increase_per_epoch, ensemble, num_gpus, initial_epoch, scale_factor=1, weights_path=None):
os.makedirs((weights_dir + '/bayesian'), exist_ok=True)
os.makedirs((weights_dir + '/dropout'), exist_ok=True)
if ensemble:
checkpoint_path = (weights_dir + '/ensemble/ensemble-{epoch:02d}-{val_acc:.3f}-{val_loss:.0f}.h5')
if weights_path:
latest_weights_path = weights_path
else:
latest_weights_path = get_latest_file((weights_dir + '/bayesian'))
net = ensemble_vnet
elif bayesian:
checkpoint_path = (weights_dir + '/bayesian/bayesian-{epoch:02d}-{val_acc:.3f}-{val_loss:.0f}.h5')
if weights_path:
latest_weights_path = weights_path
else:
latest_weights_path = get_latest_file((weights_dir + '/bayesian'))
net = (bayesian_vnet if vnet else bayesian_unet)
else:
checkpoint_path = (weights_dir + '/dropout/dropout-{epoch:02d}-{val_acc:.3f}-{val_loss:.2f}.h5')
if weights_path:
latest_weights_path = weights_path
else:
latest_weights_path = get_latest_file((weights_dir + '/dropout'))
net = (dropout_vnet if vnet else dropout_unet)
if (latest_weights_path and resume):
model = load_model(input_shape, latest_weights_path, net)
else:
model = net(input_shape, kernel_size=kernel_size, activation=activation, padding=padding, prior_std=prior_std)
model.summary(line_length=127)
if (num_gpus > 1):
model = multi_gpu_model(model, gpus=num_gpus)
if bayesian:
if (initial_epoch >= kl_start_epoch):
kl_alpha = min(1.0, (kl_alpha + ((initial_epoch - kl_start_epoch) * kl_alpha_increase_per_epoch)))
kl_alpha = K.variable(kl_alpha)
loss = variational_free_energy_loss(model, scale_factor, kl_alpha)
else:
kl_alpha = None
loss = binary_crossentropy
model.compile(loss=loss, optimizer=Adam(), metrics=['accuracy'])
return (model, checkpoint_path, kl_alpha) |
def cnn_7layer_imagenet(in_ch=3, in_dim=32, width=64, linear_size=512):
model = nn.Sequential(nn.Conv2d(in_ch, width, 3, stride=1, padding=1), nn.BatchNorm2d(width), nn.ReLU(), nn.Conv2d(width, width, 3, stride=1, padding=1), nn.BatchNorm2d(width), nn.ReLU(), nn.Conv2d(width, (2 * width), 3, stride=2, padding=1), nn.BatchNorm2d((2 * width)), nn.ReLU(), nn.Conv2d((2 * width), (2 * width), 3, stride=1, padding=1), nn.BatchNorm2d((2 * width)), nn.ReLU(), nn.Conv2d((2 * width), (2 * width), 3, stride=2, padding=1), nn.ReLU(), Flatten(), nn.Linear(32768, linear_size), nn.ReLU(), nn.Linear(linear_size, 200))
return model |
def _int_list_from_bigint(bigint):
if (bigint < 0):
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif (bigint == 0):
return [0]
ints = []
while (bigint > 0):
(bigint, mod) = divmod(bigint, (2 ** 32))
ints.append(mod)
return ints |
def main():
parser = HfArgumentParser(TensorFlowBenchmarkArguments)
benchmark_args = parser.parse_args_into_dataclasses()[0]
benchmark = TensorFlowBenchmark(args=benchmark_args)
try:
benchmark_args = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
arg_error_msg = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
begin_error_msg = ' '.join(str(e).split(' ')[:(- 1)])
full_error_msg = ''
depreciated_args = eval(str(e).split(' ')[(- 1)])
wrong_args = []
for arg in depreciated_args:
if (arg[2:] in TensorFlowBenchmark.deprecated_args):
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(arg)
if (len(wrong_args) > 0):
full_error_msg = ((full_error_msg + begin_error_msg) + str(wrong_args))
raise ValueError(full_error_msg)
benchmark.run() |
class DualkSchurFunctions(KBoundedQuotientBasis):
def __init__(self, kBoundedRing):
KBoundedQuotientBasis.__init__(self, kBoundedRing, 'dks')
kHLP = kBoundedRing.kHallLittlewoodP()
self.module_morphism(self._dks_to_khlp_on_basis, codomain=kHLP).register_as_coercion()
kHLP.module_morphism(self._khlp_to_dks_on_basis, codomain=self).register_as_coercion()
def _repr_(self):
return (self.realization_of()._repr_() + (' in the dual %s-Schur basis' % self.k))
def _dks_to_khlp_on_basis(self, la):
Sym = self._kBoundedRing.ambient()
kB = Sym.kBoundedSubspace(self.k, t=self.t)
Qp = Sym.hall_littlewood(t=self.t).Qp()
ks = kB.kschur()
kHLP = self._kBoundedRing.kHallLittlewoodP()
return sum(((ks(Qp(x)).coefficient(la) * kHLP(x)) for x in PartitionsGreatestLE(sum(la), self.k)))
def _khlp_to_dks_on_basis(self, la):
Sym = self._kBoundedRing.ambient()
kB = Sym.kBoundedSubspace(self.k, t=self.t)
Qp = Sym.hall_littlewood(t=self.t).Qp()
ks = kB.kschur()
return sum(((Qp(ks(x)).coefficient(la) * self(x)) for x in PartitionsGreatestLE(sum(la), self.k))) |
def load_real_images():
images = []
src_dir = os.path.join(datadir, src_instance_name)
with open(os.path.join(src_dir, 'transforms_train.json')) as f:
data_src = [x['file_path'] for x in json.load(f)['frames']]
tgt_dir = os.path.join(datadir, tgt_instance_name)
if (args.dataset == 'photoshapes'):
for name in data_src:
pose_num = name.split('_')[(- 1)]
for prefix in ['train', 'val', 'test']:
fp = os.path.join(tgt_dir, prefix, f'{tgt_instance_name}_{pose_num}.png')
if os.path.exists(fp):
images.append(trn(Image.open(fp)))
elif (args.dataset == 'dosovitskiy_chairs'):
for name in data_src:
fp = os.path.join(tgt_dir, f'{name}.png')
if os.path.exists(fp):
images.append(trn(Image.open(fp)))
return torch.stack(images) |
class TorchPoseRepresentation(PoseRepresentation):
def __init__(self, header: PoseHeader, rep_modules1: List=[], rep_modules2: List=[], rep_modules3: List=[]):
super(TorchPoseRepresentation, self).__init__(header, rep_modules1, rep_modules2, rep_modules3)
self.limb_pt1s = torch.tensor(self.limb_pt1s, dtype=torch.long)
self.limb_pt2s = torch.tensor(self.limb_pt2s, dtype=torch.long)
self.triangle_pt1s = torch.tensor(self.triangle_pt1s, dtype=torch.long)
self.triangle_pt2s = torch.tensor(self.triangle_pt2s, dtype=torch.long)
self.triangle_pt3s = torch.tensor(self.triangle_pt3s, dtype=torch.long)
def group_embeds(self, embeds: List[torch.Tensor]):
group = torch.cat(embeds, dim=0)
return group.permute(dims=[1, 2, 0])
def permute(self, src, shape: tuple):
return src.permute(shape) |
def train(opt, netG):
if (opt.vae_levels < (opt.scale_idx + 1)):
D_curr = getattr(networks_2d, opt.discriminator)(opt).to(opt.device)
if ((opt.netG != '') and (opt.resumed_idx == opt.scale_idx)):
D_curr.load_state_dict(torch.load('{}/netD_{}.pth'.format(opt.resume_dir, (opt.scale_idx - 1)))['state_dict'])
elif (opt.vae_levels < opt.scale_idx):
D_curr.load_state_dict(torch.load('{}/netD_{}.pth'.format(opt.saver.experiment_dir, (opt.scale_idx - 1)))['state_dict'])
optimizerD = optim.Adam(D_curr.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
parameter_list = []
if (not opt.train_all):
if (opt.vae_levels < (opt.scale_idx + 1)):
train_depth = min(opt.train_depth, ((len(netG.body) - opt.vae_levels) + 1))
parameter_list += [{'params': block.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** ((len(netG.body[(- train_depth):]) - 1) - idx)))} for (idx, block) in enumerate(netG.body[(- train_depth):])]
else:
parameter_list += [{'params': netG.encode.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** opt.scale_idx))}, {'params': netG.decoder.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** opt.scale_idx))}]
parameter_list += [{'params': block.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** ((len(netG.body[(- opt.train_depth):]) - 1) - idx)))} for (idx, block) in enumerate(netG.body[(- opt.train_depth):])]
elif (len(netG.body) < opt.train_depth):
parameter_list += [{'params': netG.encode.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** opt.scale_idx))}, {'params': netG.decoder.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** opt.scale_idx))}]
parameter_list += [{'params': block.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** ((len(netG.body) - 1) - idx)))} for (idx, block) in enumerate(netG.body)]
else:
parameter_list += [{'params': block.parameters(), 'lr': (opt.lr_g * (opt.lr_scale ** ((len(netG.body[(- opt.train_depth):]) - 1) - idx)))} for (idx, block) in enumerate(netG.body[(- opt.train_depth):])]
optimizerG = optim.Adam(parameter_list, lr=opt.lr_g, betas=(opt.beta1, 0.999))
if (opt.device == 'cuda'):
G_curr = torch.nn.DataParallel(netG)
if (opt.vae_levels < (opt.scale_idx + 1)):
D_curr = torch.nn.DataParallel(D_curr)
else:
G_curr = netG
progressbar_args = {'iterable': range(opt.niter), 'desc': 'Training scale [{}/{}]'.format((opt.scale_idx + 1), (opt.stop_scale + 1)), 'train': True, 'offset': 0, 'logging_on_update': False, 'logging_on_close': True, 'postfix': True}
epoch_iterator = tools.create_progressbar(**progressbar_args)
iterator = iter(data_loader)
for iteration in epoch_iterator:
try:
data = next(iterator)
except StopIteration:
iterator = iter(opt.data_loader)
data = next(iterator)
if (opt.scale_idx > 0):
(real, real_zero) = data
real = real.to(opt.device)
real_zero = real_zero.to(opt.device)
else:
real = data.to(opt.device)
real_zero = real
initial_size = utils.get_scales_by_index(0, opt.scale_factor, opt.stop_scale, opt.img_size)
initial_size = [int((initial_size * opt.ar)), initial_size]
opt.Z_init_size = [opt.batch_size, opt.latent_dim, *initial_size]
noise_init = utils.generate_noise(size=opt.Z_init_size, device=opt.device)
if (iteration == 0):
if opt.const_amp:
opt.Noise_Amps.append(1)
else:
with torch.no_grad():
if (opt.scale_idx == 0):
opt.noise_amp = 1
opt.Noise_Amps.append(opt.noise_amp)
else:
opt.Noise_Amps.append(0)
(z_reconstruction, _, _) = G_curr(real_zero, opt.Noise_Amps, mode='rec')
RMSE = torch.sqrt(F.mse_loss(real, z_reconstruction))
opt.noise_amp = ((opt.noise_amp_init * RMSE.item()) / opt.batch_size)
opt.Noise_Amps[(- 1)] = opt.noise_amp
total_loss = 0
(generated, generated_vae, (mu, logvar)) = G_curr(real_zero, opt.Noise_Amps, mode='rec')
if (opt.vae_levels >= (opt.scale_idx + 1)):
rec_vae_loss = (opt.rec_loss(generated, real) + opt.rec_loss(generated_vae, real_zero))
kl_loss = kl_criterion(mu, logvar)
vae_loss = ((opt.rec_weight * rec_vae_loss) + (opt.kl_weight * kl_loss))
total_loss += vae_loss
else:
D_curr.zero_grad()
output = D_curr(real)
errD_real = (- output.mean())
(fake, _) = G_curr(noise_init, opt.Noise_Amps, noise_init=noise_init, mode='rand')
output = D_curr(fake.detach())
errD_fake = output.mean()
gradient_penalty = calc_gradient_penalty(D_curr, real, fake, opt.lambda_grad, opt.device)
errD_total = ((errD_real + errD_fake) + gradient_penalty)
errD_total.backward()
optimizerD.step()
errG_total = 0
rec_loss = opt.rec_loss(generated, real)
errG_total += (opt.rec_weight * rec_loss)
output = D_curr(fake)
errG = ((- output.mean()) * opt.disc_loss_weight)
errG_total += errG
total_loss += errG_total
G_curr.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(G_curr.parameters(), opt.grad_clip)
optimizerG.step()
epoch_iterator.set_description('Scale [{}/{}], Iteration [{}/{}]'.format((opt.scale_idx + 1), (opt.stop_scale + 1), (iteration + 1), opt.niter))
if opt.visualize:
opt.summary.add_scalar('Video/Scale {}/noise_amp'.format(opt.scale_idx), opt.noise_amp, iteration)
if (opt.vae_levels >= (opt.scale_idx + 1)):
opt.summary.add_scalar('Video/Scale {}/KLD'.format(opt.scale_idx), kl_loss.item(), iteration)
else:
opt.summary.add_scalar('Video/Scale {}/rec loss'.format(opt.scale_idx), rec_loss.item(), iteration)
opt.summary.add_scalar('Video/Scale {}/noise_amp'.format(opt.scale_idx), opt.noise_amp, iteration)
if (opt.vae_levels < (opt.scale_idx + 1)):
opt.summary.add_scalar('Video/Scale {}/errG'.format(opt.scale_idx), errG.item(), iteration)
opt.summary.add_scalar('Video/Scale {}/errD_fake'.format(opt.scale_idx), errD_fake.item(), iteration)
opt.summary.add_scalar('Video/Scale {}/errD_real'.format(opt.scale_idx), errD_real.item(), iteration)
else:
opt.summary.add_scalar('Video/Scale {}/Rec VAE'.format(opt.scale_idx), rec_vae_loss.item(), iteration)
if ((iteration % opt.print_interval) == 0):
with torch.no_grad():
fake_var = []
fake_vae_var = []
for _ in range(3):
noise_init = utils.generate_noise(ref=noise_init)
(fake, fake_vae) = G_curr(noise_init, opt.Noise_Amps, noise_init=noise_init, mode='rand')
fake_var.append(fake)
fake_vae_var.append(fake_vae)
fake_var = torch.cat(fake_var, dim=0)
fake_vae_var = torch.cat(fake_vae_var, dim=0)
opt.summary.visualize_image(opt, iteration, real, 'Real')
opt.summary.visualize_image(opt, iteration, generated, 'Generated')
opt.summary.visualize_image(opt, iteration, generated_vae, 'Generated VAE')
opt.summary.visualize_image(opt, iteration, fake_var, 'Fake var')
opt.summary.visualize_image(opt, iteration, fake_vae_var, 'Fake VAE var')
epoch_iterator.close()
opt.saver.save_checkpoint({'data': opt.Noise_Amps}, 'Noise_Amps.pth')
opt.saver.save_checkpoint({'scale': opt.scale_idx, 'state_dict': netG.state_dict(), 'optimizer': optimizerG.state_dict(), 'noise_amps': opt.Noise_Amps}, 'netG.pth')
if (opt.vae_levels < (opt.scale_idx + 1)):
opt.saver.save_checkpoint({'scale': opt.scale_idx, 'state_dict': (D_curr.module.state_dict() if (opt.device == 'cuda') else D_curr.state_dict()), 'optimizer': optimizerD.state_dict()}, 'netD_{}.pth'.format(opt.scale_idx)) |
def quantize_node(root_module, graph, node, activation_post_process):
def module_has_qparams_attr_with_index(module, qparams, i):
for name in qparams.keys():
if hasattr(module, (name + str(i))):
return True
return False
def get_next_qparams_idx(module, qparams):
idx = 0
while module_has_qparams_attr_with_index(module, qparams, idx):
idx += 1
return idx
(quantize_op, qparams) = get_quantize_op_and_qparams(activation_post_process)
idx = get_next_qparams_idx(root_module, qparams)
inputs = [node]
for (key, value) in qparams.items():
setattr(root_module, (key + str(idx)), value)
qparam_full_path = (key + str(idx))
inputs.append(graph.create_node('get_attr', qparam_full_path))
return graph.create_node('call_function', quantize_op, tuple(inputs), {}) |
def c(alf, bet, i, j, gn=1):
f = _c(alf, bet, i, j)
return (f if (gn == 1) else ((gn(alf, bet, j) / gn(alf, bet, i)) * f)) |
class ChunkStream():
def __init__(self, fp):
self.fp = fp
self.queue = []
def read(self):
cid = None
if self.queue:
(cid, pos, length) = self.queue.pop()
self.fp.seek(pos)
else:
s = self.fp.read(8)
cid = s[4:]
pos = self.fp.tell()
length = i32(s)
if (not is_cid(cid)):
if (not ImageFile.LOAD_TRUNCATED_IMAGES):
raise SyntaxError(('broken PNG file (chunk %s)' % repr(cid)))
return (cid, pos, length)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
self.queue = self.crc = self.fp = None
def push(self, cid, pos, length):
self.queue.append((cid, pos, length))
def call(self, cid, pos, length):
logger.debug('STREAM %r %s %s', cid, pos, length)
return getattr(self, ('chunk_' + cid.decode('ascii')))(pos, length)
def crc(self, cid, data):
if (ImageFile.LOAD_TRUNCATED_IMAGES and ((i8(cid[0]) >> 5) & 1)):
self.crc_skip(cid, data)
return
try:
crc1 = _crc32(data, _crc32(cid))
crc2 = i32(self.fp.read(4))
if (crc1 != crc2):
raise SyntaxError(('broken PNG file (bad header checksum in %r)' % cid))
except struct.error:
raise SyntaxError(('broken PNG file (incomplete checksum in %r)' % cid))
def crc_skip(self, cid, data):
self.fp.read(4)
def verify(self, endchunk=b'IEND'):
cids = []
while True:
try:
(cid, pos, length) = self.read()
except struct.error:
raise OSError('truncated PNG file')
if (cid == endchunk):
break
self.crc(cid, ImageFile._safe_read(self.fp, length))
cids.append(cid)
return cids |
class Treccani(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.0)] * self.N), ([5.0] * self.N)))
self.custom_bounds = [((- 2), 2), ((- 2), 2)]
self.global_optimum = [[(- 2.0), 0.0]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return ((((x[0] ** 4) + (4.0 * (x[0] ** 3))) + (4.0 * (x[0] ** 2))) + (x[1] ** 2)) |
def gaussian_pdf(sd, x):
if (sd <= 0):
raise ValueError('standard deviation must be positive but is {}'.format(sd))
else:
return ((np.e ** ((- 0.5) * ((x / sd) ** 2))) / sd) |
class FiveCrops(object):
def __init__(self, size, mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], interpolation=Image.BILINEAR, tenCrops=False):
self.size = size
self.interpolation = interpolation
self.mean = mean
self.std = std
self.to_Tensor = ToTensor()
self.normalize = Normalize(self.mean, self.std)
self.tenCrops = tenCrops
def __call__(self, img, inv, flow):
crop_size = self.size
image_width = img.size[0]
image_height = img.size[1]
crop_positions = []
center_x = (image_width // 2)
center_y = (image_height // 2)
box_half = (crop_size // 2)
x1 = (center_x - box_half)
y1 = (center_y - box_half)
x2 = (center_x + box_half)
y2 = (center_y + box_half)
crop_positions += [[x1, y1, x2, y2]]
x1 = 0
y1 = 0
x2 = crop_size
y2 = crop_size
crop_positions += [[x1, y1, x2, y2]]
x1 = (image_width - crop_size)
y1 = 1
x2 = image_width
y2 = crop_size
crop_positions += [[x1, y1, x2, y2]]
x1 = 1
y1 = (image_height - crop_size)
x2 = crop_size
y2 = image_height
crop_positions += [[x1, y1, x2, y2]]
x1 = (image_width - crop_size)
y1 = (image_height - crop_size)
x2 = image_width
y2 = image_height
crop_positions += [[x1, y1, x2, y2]]
cropped_imgs = [img.crop(crop_positions[i]).resize((self.size, self.size), self.interpolation) for i in range(5)]
if (self.tenCrops is True):
if (inv is True):
flipped_imgs = [ImageOps.invert(cropped_imgs[i].transpose(Image.FLIP_LEFT_RIGHT)) for i in range(5)]
else:
flipped_imgs = [cropped_imgs[i].transpose(Image.FLIP_LEFT_RIGHT) for i in range(5)]
cropped_imgs += flipped_imgs
tensor_imgs = [self.to_Tensor(img, inv, flow) for img in cropped_imgs]
normalized_imgs = [self.normalize(img, inv, flow) for img in tensor_imgs]
fiveCropImgs = torch.stack(normalized_imgs, 0)
return fiveCropImgs
def randomize_parameters(self):
pass |
def test_compute_class_weight():
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight('balanced', classes=classes, y=y)
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert (cw[0] < cw[1] < cw[2]) |
def bn_weight_change(bn: torch.nn.Module):
bw_shape = bn.weight.shape
delattr(bn, 'weight')
delattr(bn, 'bias')
delattr(bn, 'running_var')
delattr(bn, 'running_mean')
bn.register_buffer('weight', torch.rand(bw_shape))
bn.register_buffer('bias', torch.rand(bw_shape))
bn.register_buffer('running_var', torch.abs(torch.rand(bw_shape)))
bn.register_buffer('running_mean', torch.rand(bw_shape))
return bn |
class SchemeHomset_points_abelian_variety_field(SchemeHomset_points_projective_field):
def _element_constructor_(self, *v, **kwds):
if (len(v) == 1):
v = v[0]
return self.codomain()._point(self.extended_codomain(), v, **kwds)
def _repr_(self):
s = ('Abelian group of points on ' + str(self.extended_codomain()))
return s
def base_extend(self, R):
if (R is not ZZ):
raise NotImplementedError('Abelian variety point sets are not implemented as modules over rings other than ZZ')
return self |
def save(saver, sess, logdir):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if (not os.path.exists(logdir)):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, write_meta_graph=False)
print('The weights have been converted to {}.'.format(checkpoint_path)) |
class TestStyleGAN2Generator():
def setup_class(cls):
cls.default_cfg = dict(out_size=64, style_channels=16, num_mlps=4, channel_multiplier=1)
def test_stylegan2_g_cpu(self):
g = StyleGANv2Generator(**self.default_cfg)
res = g(None, num_batches=2)
assert (res.shape == (2, 3, 64, 64))
truncation_mean = g.get_mean_latent()
res = g(None, num_batches=2, randomize_noise=False, truncation=0.7, truncation_latent=truncation_mean)
assert (res.shape == (2, 3, 64, 64))
res = g.style_mixing(2, 2, truncation_latent=truncation_mean)
assert (res.shape[2] == 64)
random_noise = g.make_injected_noise()
res = g(None, num_batches=1, injected_noise=random_noise, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
random_noise = g.make_injected_noise()
res = g(None, num_batches=1, injected_noise=None, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
styles = [torch.randn((1, 16)) for _ in range(2)]
res = g(styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
res = g(torch.randn, num_batches=1, injected_noise=None, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
g.eval()
assert (g.default_style_mode == 'single')
g.train()
assert (g.default_style_mode == 'mix')
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)) for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = StyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert (res.shape == (2, 3, 256, 256))
.skipif((not torch.cuda.is_available()), reason='requires cuda')
def test_g_cuda(self):
g = StyleGANv2Generator(**self.default_cfg).cuda()
res = g(None, num_batches=2)
assert (res.shape == (2, 3, 64, 64))
random_noise = g.make_injected_noise()
res = g(None, num_batches=1, injected_noise=random_noise, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
random_noise = g.make_injected_noise()
res = g(None, num_batches=1, injected_noise=None, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
styles = [torch.randn((1, 16)).cuda() for _ in range(2)]
res = g(styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
res = g(torch.randn, num_batches=1, injected_noise=None, randomize_noise=False)
assert (res.shape == (1, 3, 64, 64))
g.eval()
assert (g.default_style_mode == 'single')
g.train()
assert (g.default_style_mode == 'mix')
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)).cuda() for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = StyleGANv2Generator(**cfg_).cuda()
res = g(None, num_batches=2)
assert (res.shape == (2, 3, 256, 256)) |
def denormalize(sql, schema, return_parse_tree=False, **kwargs):
dn = Denormalizer(schema, **kwargs)
ast = (sql if isinstance(sql, dict) else parse(sql))
dn.denormalize(ast)
if return_parse_tree:
return (ast, dn.contains_self_join)
else:
dn_sql = format(ast, schema, quote_values=not_number_date_field, should_quote=not_number_date_field_table)
return (dn_sql, dn.contains_self_join) |
def create_exp_dir(path, scripts_to_save=None):
import time
time.sleep(2)
if (not os.path.exists(path)):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if (scripts_to_save is not None):
os.makedirs(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file) |
.parametrize('ctx, func_name', ctxs)
.parametrize('w_shape , channel_axis', [((8, 4, 3, 3), 0), ((32, 16, 3, 3), (- 4)), ((16, 1), 1), ((8, 4, 16), (- 1)), ((4, 2, 8), 2)])
.parametrize('eps', [1e-05])
.parametrize('output_stat', [False])
def test_weight_standardization_double_backward(rng, ctx, func_name, w_shape, channel_axis, eps, output_stat):
from nbla_test_utils import backward_function_tester
w = np.array(rng.randn(*w_shape).astype(np.float32))
backward_function_tester(rng, F.weight_standardization, inputs=[w], func_args=[channel_axis, eps, output_stat], ctx=ctx) |
def process_channel(channelxml: ET.ElementTree, resolver: ResolverType, track_progress: bool=False) -> tuple[(str, list[float], list[Sample], list[Parameter])]:
channel = channelxml.getroot()
inputfile = channel.attrib.get('InputFile', '')
histopath = channel.attrib.get('HistoPath', '')
samples = tqdm.tqdm(channel.findall('Sample'), unit='sample', disable=(not track_progress))
channel_name = channel.attrib['Name']
data = channel.findall('Data')
if data:
parsed_data = process_data(data[0], resolver, inputfile, histopath)
else:
raise RuntimeError(f'Channel {channel_name} is missing data. See issue #1911.')
results = []
channel_parameter_configs: list[Parameter] = []
for sample in samples:
samples.set_description(f" - sample {sample.attrib.get('Name')}")
result = process_sample(sample, resolver, inputfile, histopath, channel_name, track_progress)
channel_parameter_configs.extend(result.pop('parameter_configs'))
results.append(result)
return (channel_name, parsed_data, results, channel_parameter_configs) |
(resources={'machine': 1})
def multicast(args_dict, notification_address, world_size, world_rank, object_size):
store = utils.create_store_using_dict(args_dict)
object_id = store_lib.ObjectID((b'\x00' * 20))
if (world_rank == 0):
array = np.random.randint((2 ** 30), size=(object_size // 4), dtype=np.int32)
buffer = store_lib.Buffer.from_buffer(array)
print('Buffer created, hash =', hash(buffer))
store.put(buffer, object_id)
barrier(notification_address, notification_port, world_size)
else:
barrier(notification_address, notification_port, world_size)
start = time.time()
buffer = store.get(object_id)
duration = (time.time() - start)
print('Buffer received, hash =', hash(buffer), 'duration =', duration)
array = np.frombuffer(buffer, dtype=np.int32)
print(array) |
def test_downloader():
makeStationList(client_list=['SCEDC'], min_lat=35.5, max_lat=35.6, min_lon=(- 117.8), max_lon=(- 117.4), start_time='2019-09-01 00:00:00.00', end_time='2019-09-03 00:00:00.00', channel_list=['HH[ZNE]', 'HH[Z21]', 'BH[ZNE]', 'EH[ZNE]', 'SH[ZNE]', 'HN[ZNE]', 'HN[Z21]', 'DP[ZNE]'], filter_network=['SY'], filter_station=[])
downloadMseeds(client_list=['SCEDC', 'IRIS'], stations_json='station_list.json', output_dir='downloads_mseeds', start_time='2019-09-01 00:00:00.00', end_time='2019-09-02 00:00:00.00', min_lat=35.5, max_lat=35.6, min_lon=(- 117.8), max_lon=(- 117.4), chunck_size=1, channel_list=[], n_processor=2)
dir_list = [ev for ev in os.listdir('.')]
if (('downloads_mseeds' in dir_list) and ('station_list.json' in dir_list)):
successful = True
else:
successful = False
assert (successful == True) |
def filter_story(input_, dim_, sent_id):
if ((dim_ == '<|xNeed|>') or (dim_ == '<|xAttr|>') or (dim_ == '<|xIntent|>')):
return input_[:(sent_id + 1)]
return input_ |
class LSQUnivariateSpline(UnivariateSpline):
def __init__(self, x, y, t, w=None, bbox=([None] * 2), k=3, ext=0, check_finite=False):
if check_finite:
w_finite = (np.isfinite(w).all() if (w is not None) else True)
if ((not np.isfinite(x).all()) or (not np.isfinite(y).all()) or (not w_finite) or (not np.isfinite(t).all())):
raise ValueError('Input(s) must not contain NaNs or infs.')
if (not np.all((diff(x) >= 0.0))):
raise ValueError('x must be increasing')
xb = bbox[0]
xe = bbox[1]
if (xb is None):
xb = x[0]
if (xe is None):
xe = x[(- 1)]
t = concatenate((([xb] * (k + 1)), t, ([xe] * (k + 1))))
n = len(t)
if (not np.all(((t[(k + 1):(n - k)] - t[k:((n - k) - 1)]) > 0), axis=0)):
raise ValueError('Interior knots t must satisfy Schoenberg-Whitney conditions')
if (not (dfitpack.fpchec(x, t, k) == 0)):
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = (data[:(- 3)] + (None, None, data[(- 1)]))
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError(('Unknown extrapolation mode %s.' % ext)) |
def set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, profile=None, sci_mode=None):
if (profile is not None):
if (profile == 'default'):
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
elif (profile == 'short'):
PRINT_OPTS.precision = 2
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 2
PRINT_OPTS.linewidth = 80
elif (profile == 'full'):
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = inf
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
if (precision is not None):
PRINT_OPTS.precision = precision
if (threshold is not None):
PRINT_OPTS.threshold = threshold
if (edgeitems is not None):
PRINT_OPTS.edgeitems = edgeitems
if (linewidth is not None):
PRINT_OPTS.linewidth = linewidth
PRINT_OPTS.sci_mode = sci_mode |
class AZNet(hk.Module):
def __init__(self, num_actions, num_channels: int=64, num_blocks: int=5, resnet_v2: bool=True, name='az_net'):
super().__init__(name=name)
self.num_actions = num_actions
self.num_channels = num_channels
self.num_blocks = num_blocks
self.resnet_v2 = resnet_v2
self.resnet_cls = (BlockV2 if resnet_v2 else BlockV1)
def __call__(self, x, is_training, test_local_stats):
x = x.astype(jnp.float32)
x = hk.Conv2D(self.num_channels, kernel_shape=3)(x)
if (not self.resnet_v2):
x = hk.BatchNorm(True, True, 0.9)(x, is_training, test_local_stats)
x = jax.nn.relu(x)
for i in range(self.num_blocks):
x = self.resnet_cls(self.num_channels, name=f'block_{i}')(x, is_training, test_local_stats)
if self.resnet_v2:
x = hk.BatchNorm(True, True, 0.9)(x, is_training, test_local_stats)
x = jax.nn.relu(x)
logits = hk.Conv2D(output_channels=2, kernel_shape=1)(x)
logits = hk.BatchNorm(True, True, 0.9)(logits, is_training, test_local_stats)
logits = jax.nn.relu(logits)
logits = hk.Flatten()(logits)
logits = hk.Linear(self.num_actions)(logits)
v = hk.Conv2D(output_channels=1, kernel_shape=1)(x)
v = hk.BatchNorm(True, True, 0.9)(v, is_training, test_local_stats)
v = jax.nn.relu(v)
v = hk.Flatten()(v)
v = hk.Linear(self.num_channels)(v)
v = jax.nn.relu(v)
v = hk.Linear(1)(v)
v = jnp.tanh(v)
v = v.reshape(((- 1),))
return (logits, v) |
def register_types(module):
root_module = module.get_root()
module.add_enum('QueueDiscSizePolicy', ['SINGLE_INTERNAL_QUEUE', 'SINGLE_CHILD_QUEUE_DISC', 'MULTIPLE_QUEUES', 'NO_LIMITS'])
module.add_enum('QueueSizeUnit', ['PACKETS', 'BYTES'], import_from_module='ns.network')
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DataRate', import_from_module='ns.network')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::QueueItem'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core')
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Mac8Address', import_from_module='ns.network')
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', u'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', u'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', u'ns3::NetDeviceContainer::Iterator&')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('QueueDiscContainer')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::QueueDisc > > const_iterator', u'ns3::QueueDiscContainer::ConstIterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::QueueDisc > > const_iterator*', u'ns3::QueueDiscContainer::ConstIterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::QueueDisc > > const_iterator&', u'ns3::QueueDiscContainer::ConstIterator&')
module.add_class('QueueDiscFactory')
module.add_class('QueueSize', import_from_module='ns.network')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('Timer', import_from_module='ns.core')
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core')
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['bool'])
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned int'])
module.add_class('TrafficControlHelper')
typehandlers.add_type_alias(u'std::vector< unsigned short >', u'ns3::TrafficControlHelper::ClassIdList')
typehandlers.add_type_alias(u'std::vector< unsigned short >*', u'ns3::TrafficControlHelper::ClassIdList*')
typehandlers.add_type_alias(u'std::vector< unsigned short >&', u'ns3::TrafficControlHelper::ClassIdList&')
typehandlers.add_type_alias(u'std::vector< unsigned short >', u'ns3::TrafficControlHelper::HandleList')
typehandlers.add_type_alias(u'std::vector< unsigned short >*', u'ns3::TrafficControlHelper::HandleList*')
typehandlers.add_type_alias(u'std::vector< unsigned short >&', u'ns3::TrafficControlHelper::HandleList&')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('PacketFilter', parent=root_module['ns3::Object'])
module.add_class('QueueDisc', parent=root_module['ns3::Object'])
module.add_enum('WakeMode', ['WAKE_ROOT', 'WAKE_CHILD'], outer_class=root_module['ns3::QueueDisc'])
module.add_class('Stats', outer_class=root_module['ns3::QueueDisc'])
typehandlers.add_type_alias(u'std::function< void ( ns3::Ptr< ns3::QueueDiscItem > ) >', u'ns3::QueueDisc::SendCallback')
typehandlers.add_type_alias(u'std::function< void ( ns3::Ptr< ns3::QueueDiscItem > ) >*', u'ns3::QueueDisc::SendCallback*')
typehandlers.add_type_alias(u'std::function< void ( ns3::Ptr< ns3::QueueDiscItem > ) >&', u'ns3::QueueDisc::SendCallback&')
typehandlers.add_type_alias(u'ns3::Queue< ns3::QueueDiscItem >', u'ns3::QueueDisc::InternalQueue')
typehandlers.add_type_alias(u'ns3::Queue< ns3::QueueDiscItem >*', u'ns3::QueueDisc::InternalQueue*')
typehandlers.add_type_alias(u'ns3::Queue< ns3::QueueDiscItem >&', u'ns3::QueueDisc::InternalQueue&')
module.add_class('QueueDiscClass', parent=root_module['ns3::Object'])
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
module.add_class('RedQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_enum('FengStatus', ['Above', 'Between', 'Below'], outer_class=root_module['ns3::RedQueueDisc'])
module.add_enum('', ['DTYPE_NONE', 'DTYPE_FORCED', 'DTYPE_UNFORCED'], outer_class=root_module['ns3::RedQueueDisc'])
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('TbfQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('TrafficControlLayer', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::QueueDisc > >', u'ns3::TrafficControlLayer::QueueDiscVector')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::QueueDisc > >*', u'ns3::TrafficControlLayer::QueueDiscVector*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::QueueDisc > >&', u'ns3::TrafficControlLayer::QueueDiscVector&')
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('CoDelQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('FifoQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('FqCoDelFlow', parent=root_module['ns3::QueueDiscClass'])
module.add_enum('FlowStatus', ['INACTIVE', 'NEW_FLOW', 'OLD_FLOW'], outer_class=root_module['ns3::FqCoDelFlow'])
module.add_class('FqCoDelQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('MqQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::DeviceAdditionListener&')
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('PfifoFastQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('PieQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_enum('BurstStateT', ['NO_BURST', 'IN_BURST', 'IN_BURST_PROTECTING'], outer_class=root_module['ns3::PieQueueDisc'])
module.add_class('PrioQueueDisc', parent=root_module['ns3::QueueDisc'])
module.add_class('PriomapChecker', parent=root_module['ns3::AttributeChecker'])
module.add_class('PriomapValue', parent=root_module['ns3::AttributeValue'])
module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::QueueItem const > )', u'ns3::QueueItem::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::QueueItem const > )*', u'ns3::QueueItem::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::QueueItem const > )&', u'ns3::QueueItem::TracedCallback&')
module.add_class('QueueSizeChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('QueueSizeValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('StringChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('StringValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'bool', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::QueueDiscItem>', 'const char *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::QueueDiscItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Time', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'unsigned int', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('QueueDiscItem', import_from_module='ns.network', parent=root_module['ns3::QueueItem'])
module.add_container('std::vector< ns3::Ptr< ns3::QueueDisc > >', 'ns3::Ptr< ns3::QueueDisc >', container_type=u'vector')
module.add_container('std::vector< unsigned short >', 'short unsigned int', container_type=u'vector')
module.add_container('ns3::TrafficControlHelper::ClassIdList', 'short unsigned int', container_type=u'vector')
module.add_container('ns3::TrafficControlHelper::HandleList', 'short unsigned int', container_type=u'vector')
module.add_container('std::map< std::string, unsigned int >', ('std::string', 'unsigned int'), container_type=u'map')
module.add_container('std::map< std::string, unsigned long >', ('std::string', 'long unsigned int'), container_type=u'map')
typehandlers.add_type_alias(u'std::array< unsigned short, 16 >', u'ns3::Priomap')
typehandlers.add_type_alias(u'std::array< unsigned short, 16 >*', u'ns3::Priomap*')
typehandlers.add_type_alias(u'std::array< unsigned short, 16 >&', u'ns3::Priomap&')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module) |
def test_serialize_infinity():
def reduction_infinity_1(a: dace.float64[3]):
return a.max()
sdfg = reduction_infinity_1.to_sdfg()
json_string = json.dumps(sdfg.to_json())
assert (json_string.find('Infinity') == (- 1))
def reduction_infinity_2(a: dace.float64[3]):
return np.max(a)
sdfg = reduction_infinity_1.to_sdfg()
json_string = json.dumps(sdfg.to_json())
assert (json_string.find('Infinity') == (- 1)) |
def S8():
A = Matrix(GF(2), [[1, 0, 0, 0, 0, 1, 1, 1], [0, 1, 0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 1, 0, 1], [0, 0, 0, 1, 1, 1, 1, 1]])
M = BinaryMatroid(A, 'abcdefgh')
M.rename(('S8: ' + repr(M)))
return M |
class PackageImporter(Importer):
modules: Dict[(str, types.ModuleType)]
def __init__(self, file_or_buffer: Union[(str, torch._C.PyTorchFileReader, Path, BinaryIO)], module_allowed: Callable[([str], bool)]=(lambda module_name: True)):
self.zip_reader: Any
if isinstance(file_or_buffer, torch._C.PyTorchFileReader):
self.filename = '<pytorch_file_reader>'
self.zip_reader = file_or_buffer
elif isinstance(file_or_buffer, (Path, str)):
self.filename = str(file_or_buffer)
if (not os.path.isdir(self.filename)):
self.zip_reader = torch._C.PyTorchFileReader(self.filename)
else:
self.zip_reader = DirectoryReader(self.filename)
else:
self.filename = '<binary>'
self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer)
self.root = _PackageNode(None)
self.modules = {}
self.extern_modules = self._read_extern()
for extern_module in self.extern_modules:
if (not module_allowed(extern_module)):
raise ImportError(f"package '{file_or_buffer}' needs the external module '{extern_module}' but that module has been disallowed")
self._add_extern(extern_module)
for fname in self.zip_reader.get_all_records():
self._add_file(fname)
self.patched_builtins = builtins.__dict__.copy()
self.patched_builtins['__import__'] = self.__import__
self.modules['torch_package_importer'] = self
self._mangler = PackageMangler()
self.storage_context: Any = None
self.last_map_location = None
self.Unpickler = (lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs))
def import_module(self, name: str, package=None):
name = self._mangler.demangle(name)
return self._gcd_import(name)
def load_binary(self, package: str, resource: str) -> bytes:
path = self._zipfile_path(package, resource)
return self.zip_reader.get_record(path)
def load_text(self, package: str, resource: str, encoding: str='utf-8', errors: str='strict') -> str:
data = self.load_binary(package, resource)
return data.decode(encoding, errors)
def load_pickle(self, package: str, resource: str, map_location=None) -> Any:
pickle_file = self._zipfile_path(package, resource)
restore_location = _get_restore_location(map_location)
loaded_storages = {}
loaded_reduces = {}
storage_context = torch._C.DeserializationStorageContext()
def load_tensor(data_type, size, key, location, restore_location):
name = f'{int(key)}.storage'
dtype = data_type(0).dtype
if storage_context.has_storage(name):
storage = storage_context.get_storage(name, dtype).storage()
else:
tensor = self.zip_reader.get_storage_from_record(('.data/' + name), size, dtype)
if isinstance(self.zip_reader, torch._C.PyTorchFileReader):
storage_context.add_storage(name, tensor)
storage = tensor.storage()
loaded_storages[key] = restore_location(storage, location)
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if (typename == 'storage'):
(data_type, key, location, size) = data
if (key not in loaded_storages):
load_tensor(data_type, size, key, _maybe_decode_ascii(location), restore_location)
storage = loaded_storages[key]
return storage
elif (typename == 'reduce_package'):
if (len(data) == 2):
(func, args) = data
return func(self, *args)
(reduce_id, func, args) = data
if (reduce_id not in loaded_reduces):
loaded_reduces[reduce_id] = func(self, *args)
return loaded_reduces[reduce_id]
else:
data_file = io.BytesIO(self.zip_reader.get_record(pickle_file))
unpickler = self.Unpickler(data_file)
unpickler.persistent_load = persistent_load
def set_deserialization_context():
self.storage_context = storage_context
self.last_map_location = map_location
try:
(yield)
finally:
self.storage_context = None
self.last_map_location = None
with set_deserialization_context():
result = unpickler.load()
torch._utils._validate_loaded_sparse_tensors()
return result
def id(self):
return self._mangler.parent_name()
def file_structure(self, *, include: 'GlobPattern'='**', exclude: 'GlobPattern'=()) -> Directory:
return _create_directory_from_file_list(self.filename, self.zip_reader.get_all_records(), include, exclude)
def _read_extern(self):
return self.zip_reader.get_record('.data/extern_modules').decode('utf-8').splitlines(keepends=False)
def _make_module(self, name: str, filename: Optional[str], is_package: bool, parent: str):
mangled_filename = (self._mangler.mangle(filename) if filename else None)
spec = importlib.machinery.ModuleSpec(name, self, origin='<package_importer>', is_package=is_package)
module = importlib.util.module_from_spec(spec)
self.modules[name] = module
module.__name__ = self._mangler.mangle(name)
ns = module.__dict__
ns['__spec__'] = spec
ns['__loader__'] = self
ns['__file__'] = mangled_filename
ns['__cached__'] = None
ns['__builtins__'] = self.patched_builtins
ns['__torch_package__'] = True
assert (module.__name__ not in _package_imported_modules)
_package_imported_modules[module.__name__] = module
self._install_on_parent(parent, name, module)
if (filename is not None):
assert (mangled_filename is not None)
assert (filename not in linecache.cache)
linecache.lazycache(mangled_filename, ns)
code = self._compile_source(filename, mangled_filename)
exec(code, ns)
return module
def _load_module(self, name: str, parent: str):
cur: _PathNode = self.root
for atom in name.split('.'):
if ((not isinstance(cur, _PackageNode)) or (atom not in cur.children)):
raise ModuleNotFoundError(f'No module named "{name}" in self-contained archive "{self.filename}" and the module is also not in the list of allowed external modules: {self.extern_modules}', name=name)
cur = cur.children[atom]
if isinstance(cur, _ExternNode):
module = self.modules[name] = importlib.import_module(name)
return module
return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent)
def _compile_source(self, fullpath: str, mangled_filename: str):
source = self.zip_reader.get_record(fullpath)
source = _normalize_line_endings(source)
return compile(source, mangled_filename, 'exec', dont_inherit=True)
def get_source(self, module_name) -> str:
module = self.import_module(demangle(module_name))
return self.zip_reader.get_record(demangle(module.__file__)).decode('utf-8')
def get_resource_reader(self, fullname):
try:
package = self._get_package(fullname)
except ImportError:
return None
if (package.__loader__ is not self):
return None
return _PackageResourceReader(self, fullname)
def _install_on_parent(self, parent: str, name: str, module: types.ModuleType):
if (not parent):
return
parent_module = self.modules[parent]
if (parent_module.__loader__ is self):
setattr(parent_module, name.rpartition('.')[2], module)
def _do_find_and_load(self, name):
path = None
parent = name.rpartition('.')[0]
if parent:
if (parent not in self.modules):
self._gcd_import(parent)
if (name in self.modules):
return self.modules[name]
parent_module = self.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
module = self._load_module(name, parent)
self._install_on_parent(parent, name, module)
return module
def _find_and_load(self, name):
module = self.modules.get(name, _NEEDS_LOADING)
if (module is _NEEDS_LOADING):
return self._do_find_and_load(name)
if (module is None):
message = 'import of {} halted; None in sys.modules'.format(name)
raise ModuleNotFoundError(message, name=name)
if (name == 'os'):
self.modules['os.path'] = cast(Any, module).path
elif (name == 'typing'):
self.modules['typing.io'] = cast(Any, module).io
self.modules['typing.re'] = cast(Any, module).re
return module
def _gcd_import(self, name, package=None, level=0):
_sanity_check(name, package, level)
if (level > 0):
name = _resolve_name(name, package, level)
return self._find_and_load(name)
def _handle_fromlist(self, module, fromlist, *, recursive=False):
module_name = demangle(module.__name__)
if hasattr(module, '__path__'):
for x in fromlist:
if (not isinstance(x, str)):
if recursive:
where = (module_name + '.__all__')
else:
where = "``from list''"
raise TypeError(f'Item in {where} must be str, not {type(x).__name__}')
elif (x == '*'):
if ((not recursive) and hasattr(module, '__all__')):
self._handle_fromlist(module, module.__all__, recursive=True)
elif (not hasattr(module, x)):
from_name = '{}.{}'.format(module_name, x)
try:
self._gcd_import(from_name)
except ModuleNotFoundError as exc:
if ((exc.name == from_name) and (self.modules.get(from_name, _NEEDS_LOADING) is not None)):
continue
raise
return module
def __import__(self, name, globals=None, locals=None, fromlist=(), level=0):
if (level == 0):
module = self._gcd_import(name)
else:
globals_ = (globals if (globals is not None) else {})
package = _calc___package__(globals_)
module = self._gcd_import(name, package, level)
if (not fromlist):
if (level == 0):
return self._gcd_import(name.partition('.')[0])
elif (not name):
return module
else:
cut_off = (len(name) - len(name.partition('.')[0]))
module_name = demangle(module.__name__)
return self.modules[module_name[:(len(module_name) - cut_off)]]
else:
return self._handle_fromlist(module, fromlist)
def _get_package(self, package):
if hasattr(package, '__spec__'):
if (package.__spec__.submodule_search_locations is None):
raise TypeError('{!r} is not a package'.format(package.__spec__.name))
else:
return package
else:
module = self.import_module(package)
if (module.__spec__.submodule_search_locations is None):
raise TypeError('{!r} is not a package'.format(package))
else:
return module
def _zipfile_path(self, package, resource=None):
package = self._get_package(package)
assert (package.__loader__ is self)
name = demangle(package.__name__)
if (resource is not None):
resource = _normalize_path(resource)
return f"{name.replace('.', '/')}/{resource}"
else:
return f"{name.replace('.', '/')}"
def _get_or_create_package(self, atoms: List[str]) -> 'Union[_PackageNode, _ExternNode]':
cur = self.root
for (i, atom) in enumerate(atoms):
node = cur.children.get(atom, None)
if (node is None):
node = cur.children[atom] = _PackageNode(None)
if isinstance(node, _ExternNode):
return node
if isinstance(node, _ModuleNode):
name = '.'.join(atoms[:i])
raise ImportError(f'inconsistent module structure. module {name} is not a package, but has submodules')
assert isinstance(node, _PackageNode)
cur = node
return cur
def _add_file(self, filename: str):
(*prefix, last) = filename.split('/')
if ((len(prefix) > 1) and (prefix[0] == '.data')):
return
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
raise ImportError(f'inconsistent module structure. package contains a module file {filename} that is a subpackage of a module marked external.')
if (last == '__init__.py'):
package.source_file = filename
elif last.endswith('.py'):
package_name = last[:(- len('.py'))]
package.children[package_name] = _ModuleNode(filename)
def _add_extern(self, extern_name: str):
(*prefix, last) = extern_name.split('.')
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
return
package.children[last] = _ExternNode() |
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_) |
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(label, append_eos=False, add_if_not_exist=False) |
def main(args):
with open(args.input_file, 'r') as f:
lines = f.readlines()
ref_pts = np.array([[(- 0.), (- 0.)], [0., (- 0.)], [0.000225, 0.], [(- 0.), 0.], [0., 0.]])
for (i, line) in enumerate(lines):
line = line.strip()
items = line.split()
img_path = items[0]
src_pts = [float(item) for item in items[1:]]
if args.prefix:
img_path = os.path.join(args.prefix, img_path)
img = misc.imread(img_path)
(img_new, new_pts, tfm) = align(img, src_pts, ref_pts, args.image_size, args.scale, args.transpose_input)
if args.visualize:
plt.imshow(img_new)
plt.show()
if args.output_dir:
file_name = os.path.basename(img_path)
sub_dir = [d for d in img_path.split('/') if (d != '')]
sub_dir = '/'.join(sub_dir[(- args.dir_depth):(- 1)])
dir_path = os.path.join(args.output_dir, sub_dir)
if (not os.path.isdir(dir_path)):
os.makedirs(dir_path)
img_path_new = os.path.join(dir_path, file_name)
misc.imsave(img_path_new, img_new)
if ((i % 100) == 0):
print(img_path_new)
return |
def load_all_logs(log_dir: str) -> Dict:
logs_name_list = []
for dir_file in os.listdir(log_dir):
if dir_file.endswith(LOG_EXTENSION):
logs_name_list.append(dir_file)
log_dict_list = []
for file_name in logs_name_list:
with open(os.path.join(log_dir, file_name), 'rb') as log_file:
data = pickle.load(log_file)
if ('monitor_data' not in data):
continue
log_dict_list.append(data)
return log_dict_list |
(scope='module')
def simple_dataframe_array_pandas():
columns_array = ['user_id', 'item_id', 'timestamp']
data_array = [(1, [2, 1, 0], 19842), (1, [4, 1], 19844), (1, [3, 1, 0], 19843), (1, [5, 1], 19845), (1, [6, 1, 0], 19846), (1, [7, 1], 19847), (2, [1, 0, 1], 19841), (2, [2, 0], 19842), (2, [3, 0, 1], 19843), (2, [4, 0], 19844), (3, [10, 0], 19844), (4, [11, 0, 1], 19843), (4, [12, 0], 19845), (1, [1, 0], 19841)]
return pd.DataFrame(data_array, columns=columns_array) |
def load_infogan_dsprites_decoder():
cfg = config['global_config']
cfg.update(config['test_config'][0])
work_dir = '/deep/group/disentangle/InfoGAN-CR/dsprites_results/'
data = np.random.randn(50000, 64, 64, 1)
(_, height, width, depth) = data.shape
latent_list = []
for i in range(cfg['uniform_reg_dim']):
latent_list.append(UniformLatent(in_dim=1, out_dim=1, low=(- 1.0), high=1.0, q_std=1.0, apply_reg=True))
if (cfg['uniform_not_reg_dim'] > 0):
latent_list.append(UniformLatent(in_dim=cfg['uniform_not_reg_dim'], out_dim=cfg['uniform_not_reg_dim'], low=(- 1.0), high=1.0, q_std=1.0, apply_reg=False))
latent = JointLatent(latent_list=latent_list)
decoder = Decoder(output_width=width, output_height=height, output_depth=depth)
infoGANDiscriminator = InfoGANDiscriminator(output_length=latent.reg_out_dim, q_l_dim=cfg['q_l_dim'])
crDiscriminator = CrDiscriminator(output_length=latent.num_reg_latent)
checkpoint_dir = os.path.join(work_dir, 'checkpoint')
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
sample_dir = os.path.join(work_dir, 'sample')
if (not os.path.exists(sample_dir)):
os.makedirs(sample_dir)
time_path = os.path.join(work_dir, 'time.txt')
metric_path = os.path.join(work_dir, 'metric.csv')
run_config = tf.ConfigProto()
sess = tf.Session(config=run_config)
metric_callbacks = []
gan = INFOGAN_CR(sess=sess, checkpoint_dir=checkpoint_dir, sample_dir=sample_dir, time_path=time_path, epoch=cfg['epoch'], batch_size=cfg['batch_size'], data=data, vis_freq=cfg['vis_freq'], vis_num_sample=cfg['vis_num_sample'], vis_num_rep=cfg['vis_num_rep'], latent=latent, decoder=decoder, infoGANDiscriminator=infoGANDiscriminator, crDiscriminator=crDiscriminator, gap_start=cfg['gap_start'], gap_decrease_times=cfg['gap_decrease_times'], gap_decrease=cfg['gap_decrease'], gap_decrease_batch=cfg['gap_decrease_batch'], cr_coe_start=cfg['cr_coe_start'], cr_coe_increase_times=cfg['cr_coe_increase_times'], cr_coe_increase=cfg['cr_coe_increase'], cr_coe_increase_batch=cfg['cr_coe_increase_batch'], info_coe_de=cfg['info_coe_de'], info_coe_infod=cfg['info_coe_infod'], metric_callbacks=metric_callbacks, metric_freq=cfg['metric_freq'], metric_path=metric_path, output_reverse=cfg['output_reverse'], de_lr=cfg['de_lr'], infod_lr=cfg['infod_lr'], crd_lr=cfg['crd_lr'], summary_freq=cfg['summary_freq'])
gan.build()
gan.load()
return (sess, gan) |
class Mish(nn.Module):
def __init__(self, inplace: bool=False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x) |
class Block8(nn.Module):
def __init__(self, scale=1.0, noReLU=False):
super().__init__()
self.scale = scale
self.noReLU = noReLU
self.branch0 = BasicConv2d(1792, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(BasicConv2d(1792, 192, kernel_size=1, stride=1), BasicConv2d(192, 192, kernel_size=(1, 3), stride=1, padding=(0, 1)), BasicConv2d(192, 192, kernel_size=(3, 1), stride=1, padding=(1, 0)))
self.conv2d = nn.Conv2d(384, 1792, kernel_size=1, stride=1)
if (not self.noReLU):
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = ((out * self.scale) + x)
if (not self.noReLU):
out = self.relu(out)
return out |
def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx=None, condition: TfExpressionEx=True) -> TfExpressionEx:
tfutil.assert_tf_initialized()
name_id = name.replace('/', '_')
if tfutil.is_tf_expression(value):
with tf.name_scope(('summary_' + name_id)), tf.device(value.device):
condition = tf.convert_to_tensor(condition, name='condition')
update_op = tf.cond(condition, (lambda : tf.group(_create_var(name, value))), tf.no_op)
with tf.control_dependencies([update_op]):
return tf.identity((value if (passthru is None) else passthru))
else:
assert (not tfutil.is_tf_expression(passthru))
assert (not tfutil.is_tf_expression(condition))
if condition:
if (name not in _immediate):
with tfutil.absolute_name_scope(('Autosummary/' + name_id)), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(_dtype)
update_op = _create_var(name, update_value)
_immediate[name] = (update_op, update_value)
(update_op, update_value) = _immediate[name]
tfutil.run(update_op, {update_value: value})
return (value if (passthru is None) else passthru) |
def register_Ns3UanPhyPer_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UanPhyPer const &', 'arg0')])
cls.add_method('CalcPer', 'double', [param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Clear', 'void', [], is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
def _convert_recs_to_tensor(recs: PandasDataFrame) -> torch.Tensor:
return _build_tensor_from_grouped_items(_groupby_recs_items(recs), (- 3)) |
def get_answer(paragraphs, question, reasoningType):
((q1_b, q2_b), (q1_i, q2_i)) = model.get_output('span-predictor', question, paragraphs)
if (reasoningType == 0):
print('Only run bridging')
answer1_b = model.get_output('qa', [q1_b], paragraphs)[0][0]
q2_b = q2_b.replace('[ANSWER]', answer1_b['text'])
answer2_b = model.get_output('qa', [q2_b], paragraphs)[0][0]
return {'q_type': 'Bridging', 'subq1': q1_b, 'answer1': answer1_b['text'], 'subq2': q2_b, 'answer2': answer2_b['text']}
if (reasoningType == 1):
print('Only run intersection')
(answer1_i, answer2_i) = model.get_output('qa', [q1_i, q2_i], paragraphs)
answer_i = get_answer_i(answer1_i, answer2_i)
return {'q_type': 'Intersection', 'subq1': q1_i, 'subq2': q2_i, 'answer2': answer_i['text']}
(answer1_b, answer1_i) = model.get_output('qa', [q1_b, q1_i], paragraphs)
answer1_b = answer1_b[0]
q2_b = q2_b.replace('[ANSWER]', answer1_b['text'])
(answer2_b, answer2_i) = model.get_output('qa', [q2_b, q2_i], paragraphs)
answer2_b = answer2_b[0]
answer_i = get_answer_i(answer1_i, answer2_i)
final_pred = model.get_output('classifier', ['{} {} ({}) {}'.format(q1_b, q2_b, 'bridge', answer2_b['text']), '{} {} ({}) {}'.format(q1_i, q2_i, 'intersec', answer_i['text'])], paragraphs)
if (final_pred[0][2] == 0):
return {'q_type': 'Bridging', 'subq1': q1_b, 'answer1': answer1_b['text'], 'subq2': q2_b, 'answer2': answer2_b['text']}
return {'q_type': 'Intersection', 'subq1': q1_i, 'subq2': q2_i, 'answer2': answer_i['text']} |
def load_param_test_data(output_path: str):
return (load_data_tensors_TW(join(output_path, 'vectors', 'test', 'identifiers_param_test_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'test', 'tokens_param_test_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'test', 'params_test_aval_types_dp.npy'))) |
def rollout(model, episode, env, tasks, demo_task_counter, live_task_counter, modalities, cfg, id_to_task_dict=None, embeddings=None):
(state_obs, rgb_obs, depth_obs, actions, _, reset_info, idx) = episode
seq_len_max = (state_obs.shape[0] - 1)
for mod in modalities:
groundtruth_task = id_to_task_dict[int(idx)]
obs = env.reset(robot_obs=reset_info['robot_obs'][0], scene_obs=reset_info['scene_obs'][0])
start_info = env.get_info()
demo_task_counter += Counter(groundtruth_task)
current_img_obs = obs['rgb_obs']
current_state_obs = obs['state_obs']
if (mod == 'lang'):
_task = np.random.choice(list(groundtruth_task))
task_embeddings = embeddings[_task]
goal_lang = torch.tensor(task_embeddings[np.random.randint(task_embeddings.shape[0])]).unsqueeze(0)
else:
goal_img = [rgb_ob[(- 1)].unsqueeze(0).cuda() for rgb_ob in rgb_obs]
goal_state = state_obs[(- 1)].unsqueeze(0).cuda()
for step in range(cfg.ep_len):
if ((step % cfg.replan_freq) == 0):
if (mod == 'lang'):
(plan, latent_goal) = model.get_pp_plan_lang(current_img_obs, current_state_obs, goal_lang)
else:
(plan, latent_goal) = model.get_pp_plan_vision(current_img_obs, goal_img, current_state_obs, goal_state)
kps = None
if cfg.visualize:
imshow_tensor('goal_img', goal_img[0], wait=1)
imshow_tensor('current_img', current_img_obs[0], wait=1, keypoints=kps)
imshow_tensor('dataset_img', rgb_obs[0][np.clip(step, 0, seq_len_max)], wait=1)
action = model.predict_with_plan(current_img_obs, current_state_obs, latent_goal, plan)
(obs, _, _, current_info) = env.step(action)
current_task_info = tasks.get_task_info_for_set(start_info, current_info, groundtruth_task)
if (len(current_task_info) > 0):
live_task_counter += Counter(current_task_info)
break
current_img_obs = obs['rgb_obs']
current_state_obs = obs['state_obs']
print_task_log(demo_task_counter, live_task_counter, mod) |
def make_datasets(split):
is_train = (split == 'train')
paths_catalog = import_file('maskrcnn.config.paths_catalog', cfg.PATHS_CATALOG, True)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = eval(('cfg.DATASETS.' + split.upper()))
transforms = build_transforms(is_train)
datasets = build_dataset(dataset_list, transforms, DatasetCatalog, is_train)
return datasets |
def plugin(query_value_replaced: List[str], values_in_order: List[str]) -> str:
q_length = len(query_value_replaced)
query_w_values = query_value_replaced[:]
value_idx = [idx for idx in range(q_length) if (query_value_replaced[idx] == VALUE_NUM_SYMBOL.lower())]
assert (len(value_idx) == len(values_in_order))
for (idx, value) in zip(value_idx, values_in_order):
query_w_values[idx] = value
return ' '.join(query_w_values) |
class RatesOracle(nn.Module):
def __init__(self, config, num_neurons, device, **kwargs):
super().__init__()
assert (config.REQUIRES_RATES == True), 'Oracle requires rates'
if (config.LOSS.TYPE == 'poisson'):
self.classifier = nn.PoissonNLLLoss(reduction='none', log_input=config.LOGRATE)
else:
raise Exception(f'Loss type {config.LOSS_TYPE} not supported')
def get_hidden_size(self):
return 0
def forward(self, src, mask_labels, rates=None, **kwargs):
loss = self.classifier(rates, mask_labels)
masked_loss = loss[(mask_labels != UNMASKED_LABEL)]
masked_loss = masked_loss.mean()
return (masked_loss.unsqueeze(0), rates, None, torch.tensor(0, device=masked_loss.device, dtype=torch.float), None, None) |
def subsample_dataset(dataset, idxs):
dataset.data = np.array(dataset.data)[idxs].tolist()
dataset.target = np.array(dataset.target)[idxs].tolist()
dataset.uq_idxs = dataset.uq_idxs[idxs]
return dataset |
def compute_deconv_layer_sizes(h_in, w_in, kernel_sizes, strides, paddings=None):
if (paddings == None):
for (kernel, stride) in zip(kernel_sizes, strides):
(h_in, w_in) = compute_deconv_output_size(h_in, w_in, kernel, stride)
print('Output Size:', (h_in, w_in))
else:
for (kernel, stride, padding) in zip(kernel_sizes, strides, paddings):
(h_in, w_in) = compute_deconv_output_size(h_in, w_in, kernel, stride, padding=padding)
print('Output Size:', (h_in, w_in)) |
class BasePyTorchExporter(Exporter):
def __init__(self, model: torch.nn.Module, is_layer_exportable_fn: Callable, save_model_path: str, repr_dataset: Callable):
super().__init__(model, is_layer_exportable_fn, save_model_path)
self.model = copy.deepcopy(self.model)
self.repr_dataset = repr_dataset
def _substitute_fully_quantized_model(self):
for layer in self.model.modules():
if isinstance(layer, PytorchQuantizationWrapper):
_set_quantized_weights_in_wrapper(layer)
self._replace_wrapped_with_unwrapped()
def _replace_wrapped_with_unwrapped(self):
for (name, module) in self.model.named_children():
if isinstance(module, PytorchQuantizationWrapper):
setattr(self.model, name, module.layer) |
def ref_confusion_matrix(x, l, axis):
orig_x = x.copy()
x = np.rollaxis(x, axis, x.ndim).reshape((- 1), x.shape[axis])
ll = np.rollaxis(l, axis, x.ndim).flatten()
y = np.zeros((orig_x.shape[axis], orig_x.shape[axis]), int)
for (x_, ll_) in zip(x, ll):
index = (- 1)
for (i, x__) in enumerate(x_):
if (x__ >= x_[index]):
index = i
y[ll_][index] += 1
return y |
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return (self.stdoffset + self.dst(dt))
def dst(self, dt):
if ((dt is None) or (dt.tzinfo is None)):
return ZERO
assert (dt.tzinfo is self)
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
if (start <= dt.replace(tzinfo=None) < end):
return HOUR
else:
return ZERO |
class Pose():
def __init__(self, header: PoseHeader, body: PoseBody):
self.header = header
self.body = body
def read(buffer: bytes, pose_body: Type[PoseBody]=NumPyPoseBody, **kwargs):
reader = BufferReader(buffer)
header = PoseHeader.read(reader)
body = pose_body.read(header, reader, **kwargs)
return Pose(header, body)
def write(self, buffer: BinaryIO):
self.header.write(buffer)
self.body.write(self.header.version, buffer)
def focus(self):
mins = ma.min(self.body.data, axis=(0, 1, 2))
maxs = ma.max(self.body.data, axis=(0, 1, 2))
if (np.count_nonzero(mins) > 0):
self.body.data = ma.subtract(self.body.data, mins)
dimensions = (maxs - mins).tolist()
self.header.dimensions = PoseHeaderDimensions(*dimensions)
def normalize(self, info: PoseNormalizationInfo, scale_factor: float=1) -> 'Pose':
transposed = self.body.points_perspective()
p1s = transposed[info.p1]
p2s = transposed[info.p2]
center = ((p2s + p1s) / 2).mean(axis=(0, 1))
self.body.data -= center
mean_distance = distance_batch(p1s, p2s).mean()
scale = (scale_factor / mean_distance)
self.body.data = (self.body.data * scale)
return self
def normalize_distribution(self, mu=None, std=None, axis=(0, 1)):
mu = (mu if (mu is not None) else self.body.data.mean(axis=axis))
std = (std if (std is not None) else self.body.data.std(axis=axis))
self.body.data = ((self.body.data - mu) / std)
return (mu, std)
def unnormalize_distribution(self, mu, std):
self.body.data = ((self.body.data * std) + mu)
def frame_dropout_uniform(self, dropout_min: float=0.2, dropout_max: float=1.0) -> Tuple[('Pose', List[int])]:
(body, selected_indexes) = self.body.frame_dropout_uniform(dropout_min=dropout_min, dropout_max=dropout_max)
return (Pose(header=self.header, body=body), selected_indexes)
def frame_dropout_normal(self, dropout_mean: float=0.5, dropout_std: float=0.1) -> Tuple[('Pose', List[int])]:
(body, selected_indexes) = self.body.frame_dropout_normal(dropout_mean=dropout_mean, dropout_std=dropout_std)
return (Pose(header=self.header, body=body), selected_indexes)
def get_components(self, components: List[str], points: Dict[(str, List[str])]=None):
indexes = {}
new_components = {}
idx = 0
for component in self.header.components:
if (component.name in components):
new_component = PoseHeaderComponent(component.name, component.points, component.limbs, component.colors, component.format)
if ((points is not None) and (component.name in points)):
new_component.points = points[component.name]
point_index_mapping = {component.points.index(point): i for (i, point) in enumerate(new_component.points)}
old_indexes_set = set(point_index_mapping.keys())
new_component.limbs = [(point_index_mapping[l1], point_index_mapping[l2]) for (l1, l2) in component.limbs if ((l1 in old_indexes_set) and (l2 in old_indexes_set))]
indexes[component.name] = [(idx + component.points.index(p)) for p in new_component.points]
else:
indexes[component.name] = list(range(idx, (len(component.points) + idx)))
new_components[component.name] = new_component
idx += len(component.points)
new_components_order = [new_components[c] for c in components]
indexes_order = [indexes[c] for c in components]
new_header = PoseHeader(self.header.version, self.header.dimensions, new_components_order)
flat_indexes = list(chain.from_iterable(indexes_order))
new_body = self.body.get_points(flat_indexes)
return Pose(header=new_header, body=new_body)
def bbox(self):
body = self.body.bbox(self.header)
header = self.header.bbox()
return Pose(header=header, body=body)
pass_through_methods = {'augment2d', 'flip', 'interpolate', 'torch', 'tensorflow', 'slice_step'}
def __getattr__(self, attr):
if (attr not in Pose.pass_through_methods):
raise AttributeError(("Attribute '%s' doesn't exist on class Pose" % attr))
def func(*args, **kwargs):
prop = getattr(self.body, attr)
body_res = prop(*args, **kwargs)
if isinstance(body_res, PoseBody):
header = self.header
if hasattr(header, attr):
header_res = getattr(header, attr)(*args, **kwargs)
if isinstance(header_res, PoseHeader):
header = header_res
return Pose(header, body_res)
return body_res
return func |
class TupleEncoder(Encoder):
def __init__(self, observation_shape: Shape):
super().__init__()
(shape1, shape2) = observation_shape
assert isinstance(shape1, (tuple, list))
assert isinstance(shape2, (tuple, list))
self.fc1 = nn.Linear(shape1[0], 256)
self.fc2 = nn.Linear(shape2[0], 256)
self.shared = nn.Linear((256 * 2), 256)
def forward(self, x: TorchObservation) -> torch.Tensor:
h1 = self.fc1(x[0])
h2 = self.fc2(x[1])
return self.shared(torch.cat([h1, h2], dim=1)) |
class DiscreteFQEImpl(DiscreteQFunctionMixin, FQEBaseImpl):
_q_func_forwarder: ContinuousEnsembleQFunctionForwarder
_targ_q_func_forwarder: ContinuousEnsembleQFunctionForwarder
def compute_loss(self, batch: TorchMiniBatch, q_tpn: torch.Tensor) -> torch.Tensor:
return self._q_func_forwarder.compute_error(observations=batch.observations, actions=batch.actions.long(), rewards=batch.rewards, target=q_tpn, terminals=batch.terminals, gamma=(self._gamma ** batch.intervals))
def compute_target(self, batch: TorchMiniBatch, next_actions: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
return self._targ_q_func_forwarder.compute_target(batch.next_observations, next_actions.long()) |
class BiAttention(nn.Module):
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=[0.2, 0.5]):
super(BiAttention, self).__init__()
self.glimpse = glimpse
self.logits = weight_norm(BCNet(x_dim, y_dim, z_dim, glimpse, dropout=dropout, k=3), name='h_mat', dim=None)
def forward(self, v, q, v_mask=True):
(p, logits) = self.forward_all(v, q, v_mask)
return (p, logits)
def forward_all(self, v, q, v_mask=True, logit=False, mask_with=(- float('inf'))):
v_num = v.size(1)
q_num = q.size(1)
logits = self.logits(v, q)
if v_mask:
mask = (0 == v.abs().sum(2)).unsqueeze(1).unsqueeze(3).expand(logits.size())
logits.data.masked_fill_(mask.data, mask_with)
if (not logit):
p = nn.functional.softmax(logits.view((- 1), self.glimpse, (v_num * q_num)), 2)
return (p.view((- 1), self.glimpse, v_num, q_num), logits)
return logits |
def start_memory_tracing(modules_to_trace: Optional[Union[(str, Iterable[str])]]=None, modules_not_to_trace: Optional[Union[(str, Iterable[str])]]=None, events_to_trace: str='line', gpus_to_trace: Optional[List[int]]=None) -> MemoryTrace:
if is_psutil_available():
process = psutil.Process(os.getpid())
else:
logger.warning("Psutil not installed, we won't log CPU memory usage. Install psutil (pip install psutil) to use CPU memory tracing.")
process = None
if is_py3nvml_available():
try:
nvml.nvmlInit()
devices = (list(range(nvml.nvmlDeviceGetCount())) if (gpus_to_trace is None) else gpus_to_trace)
nvml.nvmlShutdown()
except (OSError, nvml.NVMLError):
logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.")
log_gpu = False
else:
log_gpu = (is_torch_available() or is_tf_available())
else:
logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to use GPU memory tracing.")
log_gpu = False
memory_trace = []
def traceit(frame, event, args):
global _is_memory_tracing_enabled
if (not _is_memory_tracing_enabled):
return traceit
if (events_to_trace is not None):
if (isinstance(events_to_trace, str) and (event != events_to_trace)):
return traceit
elif (isinstance(events_to_trace, (list, tuple)) and (event not in events_to_trace)):
return traceit
if ('__name__' not in frame.f_globals):
return traceit
name = frame.f_globals['__name__']
if (not isinstance(name, str)):
return traceit
else:
if (modules_to_trace is not None):
if (isinstance(modules_to_trace, str) and (modules_to_trace not in name)):
return traceit
elif (isinstance(modules_to_trace, (list, tuple)) and all(((m not in name) for m in modules_to_trace))):
return traceit
if (modules_not_to_trace is not None):
if (isinstance(modules_not_to_trace, str) and (modules_not_to_trace in name)):
return traceit
elif (isinstance(modules_not_to_trace, (list, tuple)) and any(((m in name) for m in modules_not_to_trace))):
return traceit
lineno = frame.f_lineno
filename = frame.f_globals['__file__']
if (filename.endswith('.pyc') or filename.endswith('.pyo')):
filename = filename[:(- 1)]
line = linecache.getline(filename, lineno).rstrip()
traced_state = Frame(filename, name, lineno, event, line)
cpu_mem = 0
if (process is not None):
mem = process.memory_info()
cpu_mem = mem.rss
gpu_mem = 0
if log_gpu:
if is_torch_available():
torch_empty_cache()
if is_tf_available():
tf_context.context()._clear_caches()
nvml.nvmlInit()
for i in devices:
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem += meminfo.used
nvml.nvmlShutdown()
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
memory_trace.append(mem_state)
return traceit
sys.settrace(traceit)
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = True
return memory_trace |
class WeightedLeastSquares(ComboObjectiveFunction):
_model = None
def __init__(self, mesh, active_cells=None, alpha_s=1.0, alpha_x=None, alpha_y=None, alpha_z=None, alpha_xx=0.0, alpha_yy=0.0, alpha_zz=0.0, length_scale_x=None, length_scale_y=None, length_scale_z=None, mapping=None, reference_model=None, reference_model_in_smooth=False, weights=None, **kwargs):
if isinstance(mesh, BaseMesh):
mesh = RegularizationMesh(mesh)
if (not isinstance(mesh, RegularizationMesh)):
TypeError(f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. Value of type {type(mesh)} provided.")
self._regularization_mesh = mesh
if ((key := 'indActive') in kwargs):
if (active_cells is not None):
raise ValueError(f"Cannot simultanously pass 'active_cells' and '{key}'. Pass 'active_cells' only.")
warnings.warn(f"The '{key}' argument has been deprecated, please use 'active_cells'. It will be removed in future versions of SimPEG.", DeprecationWarning, stacklevel=2)
active_cells = kwargs.pop(key)
self.alpha_s = alpha_s
if (alpha_x is not None):
if (length_scale_x is not None):
raise ValueError('Attempted to set both alpha_x and length_scale_x at the same time. Please use only one of them')
self.alpha_x = alpha_x
else:
self.length_scale_x = length_scale_x
if (alpha_y is not None):
if (length_scale_y is not None):
raise ValueError('Attempted to set both alpha_y and length_scale_y at the same time. Please use only one of them')
self.alpha_y = alpha_y
else:
self.length_scale_y = length_scale_y
if (alpha_z is not None):
if (length_scale_z is not None):
raise ValueError('Attempted to set both alpha_z and length_scale_z at the same time. Please use only one of them')
self.alpha_z = alpha_z
else:
self.length_scale_z = length_scale_z
if ('objfcts' not in kwargs):
objfcts = [Smallness(mesh=self.regularization_mesh), SmoothnessFirstOrder(mesh=self.regularization_mesh, orientation='x'), SmoothnessSecondOrder(mesh=self.regularization_mesh, orientation='x')]
if (mesh.dim > 1):
objfcts.extend([SmoothnessFirstOrder(mesh=self.regularization_mesh, orientation='y'), SmoothnessSecondOrder(mesh=self.regularization_mesh, orientation='y')])
if (mesh.dim > 2):
objfcts.extend([SmoothnessFirstOrder(mesh=self.regularization_mesh, orientation='z'), SmoothnessSecondOrder(mesh=self.regularization_mesh, orientation='z')])
else:
objfcts = kwargs.pop('objfcts')
super().__init__(objfcts=objfcts, unpack_on_add=False, **kwargs)
if (active_cells is not None):
self.active_cells = active_cells
self.mapping = mapping
self.reference_model = reference_model
self.reference_model_in_smooth = reference_model_in_smooth
self.alpha_xx = alpha_xx
self.alpha_yy = alpha_yy
self.alpha_zz = alpha_zz
if (weights is not None):
if (not isinstance(weights, dict)):
weights = {'user_weights': weights}
self.set_weights(**weights)
def set_weights(self, **weights):
for fct in self.objfcts:
fct.set_weights(**weights)
def remove_weights(self, key):
for fct in self.objfcts:
fct.remove_weights(key)
def cell_weights(self):
return self.objfcts[0].cell_weights
_weights.setter
def cell_weights(self, value):
warnings.warn('cell_weights are deprecated please access weights using the `set_weights`, `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0', FutureWarning, stacklevel=2)
self.set_weights(cell_weights=value)
def alpha_s(self):
return self._alpha_s
_s.setter
def alpha_s(self, value):
if (value is None):
value = 1.0
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_s must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_s must be non-negative, not {value}')
self._alpha_s = value
def alpha_x(self):
return self._alpha_x
_x.setter
def alpha_x(self, value):
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_x must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_x must be non-negative, not {value}')
self._alpha_x = value
def alpha_y(self):
return self._alpha_y
_y.setter
def alpha_y(self, value):
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_y must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_y must be non-negative, not {value}')
self._alpha_y = value
def alpha_z(self):
return self._alpha_z
_z.setter
def alpha_z(self, value):
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_z must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_z must be non-negative, not {value}')
self._alpha_z = value
def alpha_xx(self):
return self._alpha_xx
_xx.setter
def alpha_xx(self, value):
if (value is None):
value = ((self.length_scale_x * self.regularization_mesh.base_length) ** 4.0)
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_xx must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_xx must be non-negative, not {value}')
self._alpha_xx = value
def alpha_yy(self):
return self._alpha_yy
_yy.setter
def alpha_yy(self, value):
if (value is None):
value = ((self.length_scale_y * self.regularization_mesh.base_length) ** 4.0)
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_yy must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_yy must be non-negative, not {value}')
self._alpha_yy = value
def alpha_zz(self):
return self._alpha_zz
_zz.setter
def alpha_zz(self, value):
if (value is None):
value = ((self.length_scale_z * self.regularization_mesh.base_length) ** 4.0)
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError(f'alpha_zz must be a real number, saw type{type(value)}')
if (value < 0):
raise ValueError(f'alpha_zz must be non-negative, not {value}')
self._alpha_zz = value
def length_scale_x(self):
return (np.sqrt(self.alpha_x) / self.regularization_mesh.base_length)
_scale_x.setter
def length_scale_x(self, value: float):
if (value is None):
value = 1.0
try:
value = float(value)
except (TypeError, ValueError):
raise TypeError(f'length_scale_x must be a real number, saw type{type(value)}')
self.alpha_x = ((value * self.regularization_mesh.base_length) ** 2)
def length_scale_y(self):
return (np.sqrt(self.alpha_y) / self.regularization_mesh.base_length)
_scale_y.setter
def length_scale_y(self, value: float):
if (value is None):
value = 1.0
try:
value = float(value)
except (TypeError, ValueError):
raise TypeError(f'length_scale_y must be a real number, saw type{type(value)}')
self.alpha_y = ((value * self.regularization_mesh.base_length) ** 2)
def length_scale_z(self):
return (np.sqrt(self.alpha_z) / self.regularization_mesh.base_length)
_scale_z.setter
def length_scale_z(self, value: float):
if (value is None):
value = 1.0
try:
value = float(value)
except (TypeError, ValueError):
raise TypeError(f'length_scale_z must be a real number, saw type{type(value)}')
self.alpha_z = ((value * self.regularization_mesh.base_length) ** 2)
def reference_model_in_smooth(self) -> bool:
return self._reference_model_in_smooth
_model_in_smooth.setter
def reference_model_in_smooth(self, value: bool):
if (not isinstance(value, bool)):
raise TypeError(f"'reference_model_in_smooth must be of type 'bool'. Value of type {type(value)} provided.")
self._reference_model_in_smooth = value
for fct in self.objfcts:
if (getattr(fct, 'reference_model_in_smooth', None) is not None):
fct.reference_model_in_smooth = value
def nP(self):
if ((getattr(self, 'mapping', None) is not None) and (self.mapping.nP != '*')):
return self.mapping.nP
elif ((getattr(self, '_regularization_mesh', None) is not None) and (self.regularization_mesh.nC != '*')):
return self.regularization_mesh.nC
else:
return '*'
def _nC_residual(self):
nC = getattr(self.regularization_mesh, 'nC', None)
mapping = getattr(self, '_mapping', None)
if ((mapping is not None) and (mapping.shape[1] != '*')):
return self.mapping.shape[1]
elif ((nC != '*') and (nC is not None)):
return self.regularization_mesh.nC
else:
return self.nP
def _delta_m(self, m):
if (self.reference_model is None):
return m
return (m - self.reference_model)
def multipliers(self):
return [getattr(self, objfct._multiplier_pair) for objfct in self.objfcts]
def active_cells(self) -> np.ndarray:
return self.regularization_mesh.active_cells
_cells.setter
def active_cells(self, values: np.ndarray):
self.regularization_mesh.active_cells = values
active_cells = self.regularization_mesh.active_cells
for objfct in self.objfcts:
objfct.active_cells = active_cells
indActive = deprecate_property(active_cells, 'indActive', 'active_cells', '0.19.0', error=False, future_warn=True)
def reference_model(self) -> np.ndarray:
return self._reference_model
_model.setter
def reference_model(self, values: (np.ndarray | float)):
if isinstance(values, float):
values = (np.ones(self._nC_residual) * values)
for fct in self.objfcts:
fct.reference_model = values
self._reference_model = values
mref = deprecate_property(reference_model, 'mref', 'reference_model', '0.19.0', future_warn=True, error=False)
def model(self) -> np.ndarray:
return self._model
def model(self, values: (np.ndarray | float)):
if isinstance(values, float):
values = (np.ones(self._nC_residual) * values)
for fct in self.objfcts:
fct.model = values
self._model = values
def units(self) -> str:
return self._units
def units(self, units: (str | None)):
if ((units is not None) and (not isinstance(units, str))):
raise TypeError(f"'units' must be None or type str. Value of type {type(units)} provided.")
for fct in self.objfcts:
fct.units = units
self._units = units
def regularization_mesh(self) -> RegularizationMesh:
return self._regularization_mesh
def mapping(self) -> maps.IdentityMap:
return self._mapping
def mapping(self, mapping: maps.IdentityMap):
if (mapping is None):
mapping = maps.IdentityMap(nP=self._nC_residual)
if (not isinstance(mapping, maps.IdentityMap)):
raise TypeError(f"'mapping' must be of type {maps.IdentityMap}. Value of type {type(mapping)} provided.")
self._mapping = mapping
for fct in self.objfcts:
fct.mapping = mapping |
def dataset_labels(dataset):
labels = set([x.sentiment for x in dataset])
if all((re.match('^[0-9]+$', label) for label in labels)):
labels = [str(x) for x in sorted(map(int, list(labels)))]
else:
labels = sorted(list(labels))
return labels |
def DeeplabMulti(num_classes=21):
model = ResNetMulti(Bottleneck, [3, 4, 23, 3], num_classes)
return model |
def get_timestamp_embeddings(audio, model):
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=(6 * 160))
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1) |
def main(args):
path_val_data = '../../data/crowdsourced/visdial_1.0_val_crowdsourced.json'
path_images_root = '../../data/images/'
dense_annotations_jsonpath = '../../data/crowdsourced/visdial_1.0_val_dense_annotations_crowdsourced.json'
model_preds_root = '../../models/visdialconv/'
analyzer = PredictionsAnalyzer(path_val_data, dense_annotations_jsonpath, path_images_root, model_preds_root)
model_type_list = analyzer.get_models_list()
all_models_rel = {}
for model_type in model_type_list:
phase_1_key = f'relevance_dic_phase1_{model_type}'
phase_2_key = f'relevance_dic_finetune_{model_type}'
index_1_key = f'index_dic_phase1_{model_type}'
index_2_key = f'index_dic_finetune_{model_type}'
gt_1_key = f'gt_dic_phase1_{model_type}'
gt_2_key = f'gt_dic_finetune_{model_type}'
(relevance_dic_phase1, relevance_dic_finetune, index_dic_phase1, index_dic_finetune, gt_results_index_dic_phase1, gt_results_index_dic_finetune) = analyzer.get_dic_models(model_type)
all_models_rel[phase_1_key] = relevance_dic_phase1
all_models_rel[phase_2_key] = relevance_dic_finetune
all_models_rel[index_1_key] = index_dic_phase1
all_models_rel[index_2_key] = index_dic_finetune
all_models_rel[gt_1_key] = gt_results_index_dic_phase1
all_models_rel[gt_2_key] = gt_results_index_dic_finetune
row_index = 9
print(f'Model {model_type_list[0]}')
analyzer.get_analysis(model_type_list[0], row_index=row_index, is_print=True)
print(f'Model {model_type_list[1]}')
analyzer.get_analysis(model_type_list[1], row_index=row_index)
print(f'Model {model_type_list[2]}')
analyzer.get_analysis(model_type_list[2], row_index=row_index)
print(f'Model {model_type_list[3]}')
analyzer.get_analysis(model_type_list[3], row_index=row_index) |
def get_model_from_name(args, idx=(- 1)):
if ((idx != (- 1)) and (idx == (args.num_models - 1))):
width_ratio = args.width_ratio
else:
width_ratio = (- 1)
if (args.model_name == 'net'):
return Net(args)
elif (args.model_name == 'simplenet'):
return SimpleNet(args)
elif (args.model_name == 'smallmlpnet'):
return SmallMlpNet(args)
elif (args.model_name == 'mlpnet'):
return MlpNet(args, width_ratio=width_ratio)
elif (args.model_name == 'bigmlpnet'):
return BigMlpNet(args)
elif (args.model_name == 'cifarmlpnet'):
return CifarMlpNet(args)
elif ((args.model_name[0:3] == 'vgg') or (args.model_name[0:3] == 'res')):
if ((args.second_model_name is None) or (idx == 0)):
barebone_config = {'model': args.model_name, 'dataset': args.dataset}
else:
barebone_config = {'model': args.second_model_name, 'dataset': args.dataset}
return cifar_train.get_model(barebone_config, args.gpu_id, relu_inplace=(not args.prelu_acts)) |
class TinyImageNetDataset(DataInterface):
def __init__(self, **kwargs):
self.kwargs = kwargs
def shard_descriptor(self):
return self._shard_descriptor
_descriptor.setter
def shard_descriptor(self, shard_descriptor):
self._shard_descriptor = shard_descriptor
self.train_set = TransformedDataset(self._shard_descriptor.get_dataset('train'), transform=training_transform)
self.valid_set = TransformedDataset(self._shard_descriptor.get_dataset('val'), transform=valid_transform)
def get_train_loader(self, **kwargs):
generator = torch.Generator()
generator.manual_seed(0)
return DataLoader(self.train_set, batch_size=self.kwargs['train_bs'], shuffle=True, generator=generator)
def get_valid_loader(self, **kwargs):
return DataLoader(self.valid_set, batch_size=self.kwargs['valid_bs'])
def get_train_data_size(self):
return len(self.train_set)
def get_valid_data_size(self):
return len(self.valid_set) |
def _seg_69():
return [(126500, 'M', u''), (126501, 'X'), (126503, 'M', u''), (126504, 'X'), (126505, 'M', u''), (126506, 'M', u''), (126507, 'M', u''), (126508, 'M', u''), (126509, 'M', u''), (126510, 'M', u''), (126511, 'M', u''), (126512, 'M', u''), (126513, 'M', u''), (126514, 'M', u''), (126515, 'X'), (126516, 'M', u''), (126517, 'M', u''), (126518, 'M', u''), (126519, 'M', u''), (126520, 'X'), (126521, 'M', u''), (126522, 'X'), (126523, 'M', u''), (126524, 'X'), (126530, 'M', u''), (126531, 'X'), (126535, 'M', u''), (126536, 'X'), (126537, 'M', u''), (126538, 'X'), (126539, 'M', u''), (126540, 'X'), (126541, 'M', u''), (126542, 'M', u''), (126543, 'M', u''), (126544, 'X'), (126545, 'M', u''), (126546, 'M', u''), (126547, 'X'), (126548, 'M', u''), (126549, 'X'), (126551, 'M', u''), (126552, 'X'), (126553, 'M', u''), (126554, 'X'), (126555, 'M', u''), (126556, 'X'), (126557, 'M', u''), (126558, 'X'), (126559, 'M', u''), (126560, 'X'), (126561, 'M', u''), (126562, 'M', u''), (126563, 'X'), (126564, 'M', u''), (126565, 'X'), (126567, 'M', u''), (126568, 'M', u''), (126569, 'M', u''), (126570, 'M', u''), (126571, 'X'), (126572, 'M', u''), (126573, 'M', u''), (126574, 'M', u''), (126575, 'M', u''), (126576, 'M', u''), (126577, 'M', u''), (126578, 'M', u''), (126579, 'X'), (126580, 'M', u''), (126581, 'M', u''), (126582, 'M', u''), (126583, 'M', u''), (126584, 'X'), (126585, 'M', u''), (126586, 'M', u''), (126587, 'M', u''), (126588, 'M', u''), (126589, 'X'), (126590, 'M', u''), (126591, 'X'), (126592, 'M', u''), (126593, 'M', u''), (126594, 'M', u''), (126595, 'M', u''), (126596, 'M', u''), (126597, 'M', u''), (126598, 'M', u''), (126599, 'M', u''), (126600, 'M', u''), (126601, 'M', u''), (126602, 'X'), (126603, 'M', u''), (126604, 'M', u''), (126605, 'M', u''), (126606, 'M', u''), (126607, 'M', u''), (126608, 'M', u''), (126609, 'M', u''), (126610, 'M', u'')] |
def test_single_model_greedy_acquisition_builder_repr_includes_class_name() -> None:
builder = _ArbitraryGreedySingleBuilder()
assert (type(builder).__name__ in repr(builder)) |
def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None):
phase = dataset_opt['phase']
(rank, _) = get_dist_info()
if (phase == 'train'):
if dist:
batch_size = dataset_opt['batch_size_per_gpu']
num_workers = dataset_opt['num_worker_per_gpu']
else:
multiplier = (1 if (num_gpu == 0) else num_gpu)
batch_size = (dataset_opt['batch_size_per_gpu'] * multiplier)
num_workers = (dataset_opt['num_worker_per_gpu'] * multiplier)
dataloader_args = dict(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, sampler=sampler, drop_last=True)
if (sampler is None):
dataloader_args['shuffle'] = True
dataloader_args['worker_init_fn'] = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
elif (phase in ['val', 'test']):
dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
else:
raise ValueError(f"Wrong dataset phase: {phase}. Supported ones are 'train', 'val' and 'test'.")
dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
prefetch_mode = dataset_opt.get('prefetch_mode')
if (prefetch_mode == 'cpu'):
num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
logger = get_root_logger()
logger.info(f'Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}')
return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args)
else:
return torch.utils.data.DataLoader(**dataloader_args) |
def load_state(path, prefix):
gen.load_state_dict(torch.load(os.path.join(path, 'net_archive', '{0}_gen.pt'.format(prefix)), map_location=load_location_map))
gen_opt.load_state_dict(torch.load(os.path.join(path, 'net_archive', '{0}_gen_opt.pt'.format(prefix)), map_location=load_location_map))
dis.load_state_dict(torch.load(os.path.join(path, 'net_archive', '{0}_dis.pt'.format(prefix)), map_location=load_location_map))
dis_opt.load_state_dict(torch.load(os.path.join(path, 'net_archive', '{0}_dis_opt.pt'.format(prefix)), map_location=load_location_map))
state.update(torch.load(os.path.join(path, 'net_archive', '{0}_state.pt'.format(prefix))))
for solver_state in gen_opt.state.values():
for (k, v) in solver_state.items():
if torch.is_tensor(v):
solver_state[k] = v.cuda(opt.gpu)
for solver_state in dis_opt.state.values():
for (k, v) in solver_state.items():
if torch.is_tensor(v):
solver_state[k] = v.cuda(opt.gpu) |
class ResizeAndGrayscaleWrapper(gym.core.Wrapper):
def __init__(self, env, w, h):
super(ResizeAndGrayscaleWrapper, self).__init__(env)
self.observation_space = spaces.Box(0, 255, shape=[w, h], dtype=np.uint8)
self.w = w
self.h = h
def _observation(self, obs):
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
obs = cv2.resize(obs, (self.w, self.h), interpolation=cv2.INTER_AREA)
return obs
def reset(self, **kwargs):
return self._observation(self.env.reset(**kwargs))
def step(self, action):
(obs, reward, done, info) = self.env.step(action)
return (self._observation(obs), reward, done, info)
def get_obs(self):
return self._observation(self.env.get_obs()) |
def ref_bool_scatter(sdata, mask):
gdata_shape = (mask.shape + sdata.shape[1:])
mask_bool = mask.astype(bool)
gdata = np.zeros(gdata_shape)
gdata[mask_bool] = sdata
return gdata |
class FFN(nn.Module):
def __init__(self, features):
super(FFN, self).__init__()
self.layer1 = nn.Linear(features, features)
self.layer2 = nn.Linear(features, features)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.2)
def forward(self, x):
out = self.drop(self.relu(self.layer1(x)))
out = self.layer2(out)
return out |
def D_gp_loss(dis_input, dis_out):
batch_size = dis_input.size(0)
grad_penalty = autograd.grad(outputs=dis_out.mean(), inputs=dis_input, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_penalty = grad_penalty.pow(2)
assert (grad_penalty.size() == dis_input.size())
real_grad = grad_penalty.view(batch_size, (- 1)).sum(1)
return real_grad.mean() |
_function
def exterior_algebra_basis(n, degrees):
if (n == 0):
return [[0 for _ in degrees]]
if (len(degrees) == 1):
if (degrees[0] == n):
return [[1]]
return []
if (not degrees):
return []
if (min(degrees) > n):
return []
if (sum(degrees) < n):
return []
if (sum(degrees) == n):
return [[1 for _ in degrees]]
i = (len(degrees) // 2)
res = []
for j in range((n + 1)):
v1 = exterior_algebra_basis(j, degrees[:i])
v2 = exterior_algebra_basis((n - j), degrees[i:])
res += [(l1 + l2) for l1 in v1 for l2 in v2]
res.sort()
return res |
()
def tracer_mock():
tracer = MagicMock()
tracer.register_code_object.side_effect = range(100)
tracer.register_predicate.side_effect = range(100)
return tracer |
(Output('right-column-data', 'children'), Input('data-explanation-state', 'data'))
def update_view(data):
params = json.loads(data)
state = copy.deepcopy(board.state)
for (param, value) in params.items():
state.set_param('data', param, value)
return create_right_column(state) |
def clip_grad_norm_(parameters: _tensor_or_tensors, max_norm: float, norm_type: float=2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if (p.grad is not None)]
max_norm = float(max_norm)
norm_type = float(norm_type)
if (len(parameters) == 0):
return torch.tensor(0.0)
device = parameters[0].grad.device
if (norm_type == inf):
total_norm = max((p.grad.detach().abs().max().to(device) for p in parameters))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
clip_coef = (max_norm / (total_norm + 1e-06))
if (clip_coef < 1):
for p in parameters:
p.grad.detach().mul_(clip_coef.to(p.grad.device))
return total_norm |
def run_treebank(mode, paths, treebank, short_name, temp_output_file, command_args, extra_args):
constituency_dir = paths['CONSTITUENCY_DATA_DIR']
(short_language, dataset) = short_name.split('_')
train_file = os.path.join(constituency_dir, f'{short_name}_train.mrg')
dev_file = os.path.join(constituency_dir, f'{short_name}_dev.mrg')
test_file = os.path.join(constituency_dir, f'{short_name}_test.mrg')
if ((not os.path.exists(train_file)) or (not os.path.exists(dev_file)) or (not os.path.exists(test_file))):
logger.warning(f'The data for {short_name} is missing or incomplete. Attempting to rebuild...')
try:
prepare_con_dataset.main(short_name)
except:
logger.error(f'Unable to build the data. Please correctly build the files in {train_file}, {dev_file}, {test_file} and then try again.')
raise
default_args = build_default_args(paths, short_language, dataset, command_args, extra_args)
if (mode == Mode.TRAIN):
train_args = ['--train_file', train_file, '--eval_file', dev_file, '--shorthand', short_name, '--mode', 'train']
train_args = ((train_args + default_args) + extra_args)
logger.info('Running train step with args: {}'.format(train_args))
constituency_parser.main(train_args)
if ((mode == Mode.SCORE_DEV) or (mode == Mode.TRAIN)):
dev_args = ['--eval_file', dev_file, '--shorthand', short_name, '--mode', 'predict']
dev_args = ((dev_args + default_args) + extra_args)
logger.info('Running dev step with args: {}'.format(dev_args))
constituency_parser.main(dev_args)
if ((mode == Mode.SCORE_TEST) or (mode == Mode.TRAIN)):
test_args = ['--eval_file', test_file, '--shorthand', short_name, '--mode', 'predict']
test_args = ((test_args + default_args) + extra_args)
logger.info('Running test step with args: {}'.format(test_args))
constituency_parser.main(test_args)
if (mode == 'parse_text'):
text_args = ['--shorthand', short_name, '--mode', 'parse_text']
text_args = ((text_args + default_args) + extra_args)
logger.info('Processing text with args: {}'.format(text_args))
constituency_parser.main(text_args) |
class Formatter():
def __init__(self):
global _ellipses
self.max_depth = 20
self.max_args = 128
self.rational_to_decimal = False
self.precision = 10
self.ellipses = to_format(_ellipses)
self.max_visited = 10000
self.fpa_pretty = True
def pp_ellipses(self):
return self.ellipses
def pp_arrow(self):
return ' ->'
def pp_unknown(self):
return '<unknown>'
def pp_name(self, a):
return to_format(_op_name(a))
def is_infix(self, a):
return _is_infix(a)
def is_unary(self, a):
return _is_unary(a)
def get_precedence(self, a):
return _get_precedence(a)
def is_infix_compact(self, a):
return _is_infix_compact(a)
def is_infix_unary(self, a):
return (self.is_infix(a) or self.is_unary(a))
def add_paren(self, a):
return compose(to_format('('), indent(1, a), to_format(')'))
def pp_sort(self, s):
if isinstance(s, z3.ArraySortRef):
return seq1('Array', (self.pp_sort(s.domain()), self.pp_sort(s.range())))
elif isinstance(s, z3.BitVecSortRef):
return seq1('BitVec', (to_format(s.size()),))
elif isinstance(s, z3.FPSortRef):
return seq1('FPSort', (to_format(s.ebits()), to_format(s.sbits())))
elif isinstance(s, z3.ReSortRef):
return seq1('ReSort', (self.pp_sort(s.basis()),))
elif isinstance(s, z3.SeqSortRef):
if s.is_string():
return to_format('String')
return seq1('Seq', (self.pp_sort(s.basis()),))
elif isinstance(s, z3.CharSortRef):
return to_format('Char')
else:
return to_format(s.name())
def pp_const(self, a):
k = a.decl().kind()
if (k == Z3_OP_RE_EMPTY_SET):
return self.pp_set('Empty', a)
elif (k == Z3_OP_SEQ_EMPTY):
return self.pp_set('Empty', a)
elif (k == Z3_OP_RE_FULL_SET):
return self.pp_set('Full', a)
elif (k == Z3_OP_CHAR_CONST):
return self.pp_char(a)
return self.pp_name(a)
def pp_int(self, a):
return to_format(a.as_string())
def pp_rational(self, a):
if (not self.rational_to_decimal):
return to_format(a.as_string())
else:
return to_format(a.as_decimal(self.precision))
def pp_algebraic(self, a):
return to_format(a.as_decimal(self.precision))
def pp_string(self, a):
return to_format((('"' + a.as_string()) + '"'))
def pp_bv(self, a):
return to_format(a.as_string())
def pp_fd(self, a):
return to_format(a.as_string())
def pp_fprm_value(self, a):
_z3_assert(z3.is_fprm_value(a), 'expected FPRMNumRef')
if (self.fpa_pretty and (a.decl().kind() in _z3_op_to_fpa_pretty_str)):
return to_format(_z3_op_to_fpa_pretty_str.get(a.decl().kind()))
else:
return to_format(_z3_op_to_fpa_normal_str.get(a.decl().kind()))
def pp_fp_value(self, a):
_z3_assert(isinstance(a, z3.FPNumRef), 'type mismatch')
if (not self.fpa_pretty):
r = []
if a.isNaN():
r.append(to_format(_z3_op_to_fpa_normal_str[Z3_OP_FPA_NAN]))
r.append(to_format('('))
r.append(to_format(a.sort()))
r.append(to_format(')'))
return compose(r)
elif a.isInf():
if a.isNegative():
r.append(to_format(_z3_op_to_fpa_normal_str[Z3_OP_FPA_MINUS_INF]))
else:
r.append(to_format(_z3_op_to_fpa_normal_str[Z3_OP_FPA_PLUS_INF]))
r.append(to_format('('))
r.append(to_format(a.sort()))
r.append(to_format(')'))
return compose(r)
elif a.isZero():
if a.isNegative():
return to_format('-zero')
else:
return to_format('+zero')
else:
_z3_assert(z3.is_fp_value(a), 'expecting FP num ast')
r = []
sgn = c_int(0)
sgnb = Z3_fpa_get_numeral_sign(a.ctx_ref(), a.ast, byref(sgn))
exp = Z3_fpa_get_numeral_exponent_string(a.ctx_ref(), a.ast, False)
sig = Z3_fpa_get_numeral_significand_string(a.ctx_ref(), a.ast)
r.append(to_format('FPVal('))
if (sgnb and (sgn.value != 0)):
r.append(to_format('-'))
r.append(to_format(sig))
r.append(to_format('*(2**'))
r.append(to_format(exp))
r.append(to_format(', '))
r.append(to_format(a.sort()))
r.append(to_format('))'))
return compose(r)
elif a.isNaN():
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_NAN])
elif a.isInf():
if a.isNegative():
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_MINUS_INF])
else:
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_PLUS_INF])
elif a.isZero():
if a.isNegative():
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_MINUS_ZERO])
else:
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_PLUS_ZERO])
else:
_z3_assert(z3.is_fp_value(a), 'expecting FP num ast')
r = []
sgn = ctypes.c_int(0)
sgnb = Z3_fpa_get_numeral_sign(a.ctx_ref(), a.ast, byref(sgn))
exp = Z3_fpa_get_numeral_exponent_string(a.ctx_ref(), a.ast, False)
sig = Z3_fpa_get_numeral_significand_string(a.ctx_ref(), a.ast)
if (sgnb and (sgn.value != 0)):
r.append(to_format('-'))
r.append(to_format(sig))
if (exp != '0'):
r.append(to_format('*(2**'))
r.append(to_format(exp))
r.append(to_format(')'))
return compose(r)
def pp_fp(self, a, d, xs):
_z3_assert(isinstance(a, z3.FPRef), 'type mismatch')
k = a.decl().kind()
op = '?'
if (self.fpa_pretty and (k in _z3_op_to_fpa_pretty_str)):
op = _z3_op_to_fpa_pretty_str[k]
elif (k in _z3_op_to_fpa_normal_str):
op = _z3_op_to_fpa_normal_str[k]
elif (k in _z3_op_to_str):
op = _z3_op_to_str[k]
n = a.num_args()
if self.fpa_pretty:
if (self.is_infix(k) and (n >= 3)):
rm = a.arg(0)
if (z3.is_fprm_value(rm) and z3.get_default_rounding_mode(a.ctx).eq(rm)):
arg1 = to_format(self.pp_expr(a.arg(1), (d + 1), xs))
arg2 = to_format(self.pp_expr(a.arg(2), (d + 1), xs))
r = []
r.append(arg1)
r.append(to_format(' '))
r.append(to_format(op))
r.append(to_format(' '))
r.append(arg2)
return compose(r)
elif (k == Z3_OP_FPA_NEG):
return compose([to_format('-'), to_format(self.pp_expr(a.arg(0), (d + 1), xs))])
if (k in _z3_op_to_fpa_normal_str):
op = _z3_op_to_fpa_normal_str[k]
r = []
r.append(to_format(op))
if (not z3.is_const(a)):
r.append(to_format('('))
first = True
for c in a.children():
if first:
first = False
else:
r.append(to_format(', '))
r.append(self.pp_expr(c, (d + 1), xs))
r.append(to_format(')'))
return compose(r)
else:
return to_format(a.as_string())
def pp_prefix(self, a, d, xs):
r = []
sz = 0
for child in a.children():
r.append(self.pp_expr(child, (d + 1), xs))
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
break
return seq1(self.pp_name(a), r)
def is_assoc(self, k):
return _is_assoc(k)
def is_left_assoc(self, k):
return _is_left_assoc(k)
def infix_args_core(self, a, d, xs, r):
sz = len(r)
k = a.decl().kind()
p = self.get_precedence(k)
first = True
for child in a.children():
child_pp = self.pp_expr(child, (d + 1), xs)
child_k = None
if z3.is_app(child):
child_k = child.decl().kind()
if ((k == child_k) and (self.is_assoc(k) or (first and self.is_left_assoc(k)))):
self.infix_args_core(child, d, xs, r)
sz = len(r)
if (sz > self.max_args):
return
elif self.is_infix_unary(child_k):
child_p = self.get_precedence(child_k)
if ((p > child_p) or (_is_add(k) and _is_sub(child_k)) or (_is_sub(k) and first and _is_add(child_k))):
r.append(child_pp)
else:
r.append(self.add_paren(child_pp))
sz = (sz + 1)
elif z3.is_quantifier(child):
r.append(self.add_paren(child_pp))
else:
r.append(child_pp)
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
return
first = False
def infix_args(self, a, d, xs):
r = []
self.infix_args_core(a, d, xs, r)
return r
def pp_infix(self, a, d, xs):
k = a.decl().kind()
if self.is_infix_compact(k):
op = self.pp_name(a)
return group(seq(self.infix_args(a, d, xs), op, False))
else:
op = self.pp_name(a)
sz = _len(op)
op.string = (' ' + op.string)
op.size = (sz + 1)
return group(seq(self.infix_args(a, d, xs), op))
def pp_unary(self, a, d, xs):
k = a.decl().kind()
p = self.get_precedence(k)
child = a.children()[0]
child_k = None
if z3.is_app(child):
child_k = child.decl().kind()
child_pp = self.pp_expr(child, (d + 1), xs)
if ((k != child_k) and self.is_infix_unary(child_k)):
child_p = self.get_precedence(child_k)
if (p <= child_p):
child_pp = self.add_paren(child_pp)
if z3.is_quantifier(child):
child_pp = self.add_paren(child_pp)
name = self.pp_name(a)
return compose(to_format(name), indent(_len(name), child_pp))
def pp_power_arg(self, arg, d, xs):
r = self.pp_expr(arg, (d + 1), xs)
k = None
if z3.is_app(arg):
k = arg.decl().kind()
if (self.is_infix_unary(k) or (z3.is_rational_value(arg) and (arg.denominator_as_long() != 1))):
return self.add_paren(r)
else:
return r
def pp_power(self, a, d, xs):
arg1_pp = self.pp_power_arg(a.arg(0), (d + 1), xs)
arg2_pp = self.pp_power_arg(a.arg(1), (d + 1), xs)
return group(seq((arg1_pp, arg2_pp), '**', False))
def pp_neq(self):
return to_format('!=')
def pp_distinct(self, a, d, xs):
if (a.num_args() == 2):
op = self.pp_neq()
sz = _len(op)
op.string = (' ' + op.string)
op.size = (sz + 1)
return group(seq(self.infix_args(a, d, xs), op))
else:
return self.pp_prefix(a, d, xs)
def pp_select(self, a, d, xs):
if (a.num_args() != 2):
return self.pp_prefix(a, d, xs)
else:
arg1_pp = self.pp_expr(a.arg(0), (d + 1), xs)
arg2_pp = self.pp_expr(a.arg(1), (d + 1), xs)
return compose(arg1_pp, indent(2, compose(to_format('['), arg2_pp, to_format(']'))))
def pp_unary_param(self, a, d, xs):
p = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
arg = self.pp_expr(a.arg(0), (d + 1), xs)
return seq1(self.pp_name(a), [to_format(p), arg])
def pp_extract(self, a, d, xs):
high = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
low = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 1)
arg = self.pp_expr(a.arg(0), (d + 1), xs)
return seq1(self.pp_name(a), [to_format(high), to_format(low), arg])
def pp_loop(self, a, d, xs):
low = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
arg = self.pp_expr(a.arg(0), (d + 1), xs)
if (Z3_get_decl_num_parameters(a.ctx_ref(), a.decl().ast) > 1):
high = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 1)
return seq1('Loop', [arg, to_format(low), to_format(high)])
return seq1('Loop', [arg, to_format(low)])
def pp_set(self, id, a):
return seq1(id, [self.pp_sort(a.sort())])
def pp_char(self, a):
n = a.params()[0]
return to_format(str(n))
def pp_pattern(self, a, d, xs):
if (a.num_args() == 1):
return self.pp_expr(a.arg(0), d, xs)
else:
return seq1('MultiPattern', [self.pp_expr(arg, (d + 1), xs) for arg in a.children()])
def pp_is(self, a, d, xs):
f = a.params()[0]
return self.pp_fdecl(f, a, d, xs)
def pp_map(self, a, d, xs):
f = z3.get_map_func(a)
return self.pp_fdecl(f, a, d, xs)
def pp_fdecl(self, f, a, d, xs):
r = []
sz = 0
r.append(to_format(f.name()))
for child in a.children():
r.append(self.pp_expr(child, (d + 1), xs))
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
break
return seq1(self.pp_name(a), r)
def pp_K(self, a, d, xs):
return seq1(self.pp_name(a), [self.pp_sort(a.domain()), self.pp_expr(a.arg(0), (d + 1), xs)])
def pp_atmost(self, a, d, f, xs):
k = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
return seq1(self.pp_name(a), [seq3([self.pp_expr(ch, (d + 1), xs) for ch in a.children()]), to_format(k)])
def pp_pbcmp(self, a, d, f, xs):
chs = a.children()
rchs = range(len(chs))
k = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
ks = [Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, (i + 1)) for i in rchs]
ls = [seq3([self.pp_expr(chs[i], (d + 1), xs), to_format(ks[i])]) for i in rchs]
return seq1(self.pp_name(a), [seq3(ls), to_format(k)])
def pp_app(self, a, d, xs):
if z3.is_int_value(a):
return self.pp_int(a)
elif z3.is_rational_value(a):
return self.pp_rational(a)
elif z3.is_algebraic_value(a):
return self.pp_algebraic(a)
elif z3.is_bv_value(a):
return self.pp_bv(a)
elif z3.is_finite_domain_value(a):
return self.pp_fd(a)
elif z3.is_fprm_value(a):
return self.pp_fprm_value(a)
elif z3.is_fp_value(a):
return self.pp_fp_value(a)
elif z3.is_fp(a):
return self.pp_fp(a, d, xs)
elif z3.is_string_value(a):
return self.pp_string(a)
elif z3.is_const(a):
return self.pp_const(a)
else:
f = a.decl()
k = f.kind()
if (k == Z3_OP_POWER):
return self.pp_power(a, d, xs)
elif (k == Z3_OP_DISTINCT):
return self.pp_distinct(a, d, xs)
elif (k == Z3_OP_SELECT):
return self.pp_select(a, d, xs)
elif ((k == Z3_OP_SIGN_EXT) or (k == Z3_OP_ZERO_EXT) or (k == Z3_OP_REPEAT)):
return self.pp_unary_param(a, d, xs)
elif (k == Z3_OP_EXTRACT):
return self.pp_extract(a, d, xs)
elif (k == Z3_OP_RE_LOOP):
return self.pp_loop(a, d, xs)
elif (k == Z3_OP_DT_IS):
return self.pp_is(a, d, xs)
elif (k == Z3_OP_ARRAY_MAP):
return self.pp_map(a, d, xs)
elif (k == Z3_OP_CONST_ARRAY):
return self.pp_K(a, d, xs)
elif (k == Z3_OP_PB_AT_MOST):
return self.pp_atmost(a, d, f, xs)
elif (k == Z3_OP_PB_LE):
return self.pp_pbcmp(a, d, f, xs)
elif (k == Z3_OP_PB_GE):
return self.pp_pbcmp(a, d, f, xs)
elif (k == Z3_OP_PB_EQ):
return self.pp_pbcmp(a, d, f, xs)
elif z3.is_pattern(a):
return self.pp_pattern(a, d, xs)
elif self.is_infix(k):
return self.pp_infix(a, d, xs)
elif self.is_unary(k):
return self.pp_unary(a, d, xs)
else:
return self.pp_prefix(a, d, xs)
def pp_var(self, a, d, xs):
idx = z3.get_var_index(a)
sz = len(xs)
if (idx >= sz):
return seq1('Var', (to_format(idx),))
else:
return to_format(xs[((sz - idx) - 1)])
def pp_quantifier(self, a, d, xs):
ys = [to_format(a.var_name(i)) for i in range(a.num_vars())]
new_xs = (xs + ys)
body_pp = self.pp_expr(a.body(), (d + 1), new_xs)
if (len(ys) == 1):
ys_pp = ys[0]
else:
ys_pp = seq3(ys, '[', ']')
if a.is_forall():
header = 'ForAll'
elif a.is_exists():
header = 'Exists'
else:
header = 'Lambda'
return seq1(header, (ys_pp, body_pp))
def pp_expr(self, a, d, xs):
self.visited = (self.visited + 1)
if ((d > self.max_depth) or (self.visited > self.max_visited)):
return self.pp_ellipses()
if z3.is_app(a):
return self.pp_app(a, d, xs)
elif z3.is_quantifier(a):
return self.pp_quantifier(a, d, xs)
elif z3.is_var(a):
return self.pp_var(a, d, xs)
else:
return to_format(self.pp_unknown())
def pp_decl(self, f):
k = f.kind()
if ((k == Z3_OP_DT_IS) or (k == Z3_OP_ARRAY_MAP)):
g = f.params()[0]
r = [to_format(g.name())]
return seq1(self.pp_name(f), r)
return self.pp_name(f)
def pp_seq_core(self, f, a, d, xs):
self.visited = (self.visited + 1)
if ((d > self.max_depth) or (self.visited > self.max_visited)):
return self.pp_ellipses()
r = []
sz = 0
for elem in a:
r.append(f(elem, (d + 1), xs))
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
break
return seq3(r, '[', ']')
def pp_seq(self, a, d, xs):
return self.pp_seq_core(self.pp_expr, a, d, xs)
def pp_seq_seq(self, a, d, xs):
return self.pp_seq_core(self.pp_seq, a, d, xs)
def pp_model(self, m):
r = []
sz = 0
for d in m:
i = m[d]
if isinstance(i, z3.FuncInterp):
i_pp = self.pp_func_interp(i)
else:
i_pp = self.pp_expr(i, 0, [])
name = self.pp_name(d)
r.append(compose(name, to_format(' = '), indent((_len(name) + 3), i_pp)))
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
break
return seq3(r, '[', ']')
def pp_func_entry(self, e):
num = e.num_args()
if (num > 1):
args = []
for i in range(num):
args.append(self.pp_expr(e.arg_value(i), 0, []))
args_pp = group(seq3(args))
else:
args_pp = self.pp_expr(e.arg_value(0), 0, [])
value_pp = self.pp_expr(e.value(), 0, [])
return group(seq((args_pp, value_pp), self.pp_arrow()))
def pp_func_interp(self, f):
r = []
sz = 0
num = f.num_entries()
for i in range(num):
r.append(self.pp_func_entry(f.entry(i)))
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
break
if (sz <= self.max_args):
else_val = f.else_value()
if (else_val is None):
else_pp = to_format('#unspecified')
else:
else_pp = self.pp_expr(else_val, 0, [])
r.append(group(seq((to_format('else'), else_pp), self.pp_arrow())))
return seq3(r, '[', ']')
def pp_list(self, a):
r = []
sz = 0
for elem in a:
if _support_pp(elem):
r.append(self.main(elem))
else:
r.append(to_format(str(elem)))
sz = (sz + 1)
if (sz > self.max_args):
r.append(self.pp_ellipses())
break
if isinstance(a, tuple):
return seq3(r)
else:
return seq3(r, '[', ']')
def main(self, a):
if z3.is_expr(a):
return self.pp_expr(a, 0, [])
elif z3.is_sort(a):
return self.pp_sort(a)
elif z3.is_func_decl(a):
return self.pp_decl(a)
elif (isinstance(a, z3.Goal) or isinstance(a, z3.AstVector)):
return self.pp_seq(a, 0, [])
elif isinstance(a, z3.Solver):
return self.pp_seq(a.assertions(), 0, [])
elif isinstance(a, z3.Fixedpoint):
return a.sexpr()
elif isinstance(a, z3.Optimize):
return a.sexpr()
elif isinstance(a, z3.ApplyResult):
return self.pp_seq_seq(a, 0, [])
elif isinstance(a, z3.ModelRef):
return self.pp_model(a)
elif isinstance(a, z3.FuncInterp):
return self.pp_func_interp(a)
elif (isinstance(a, list) or isinstance(a, tuple)):
return self.pp_list(a)
else:
return to_format(self.pp_unknown())
def __call__(self, a):
self.visited = 0
return self.main(a) |
class TransformerDecoderLayer(Module):
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu, layer_norm_eps=1e-05, batch_first=False, norm_first=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, **factory_kwargs)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, **factory_kwargs)
self.linear1 = Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
def __setstate__(self, state):
if ('activation' not in state):
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None) -> Tensor:
x = tgt
if self.norm_first:
x = (x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask))
x = (x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask))
x = (x + self._ff_block(self.norm3(x)))
else:
x = self.norm1((x + self._sa_block(x, tgt_mask, tgt_key_padding_mask)))
x = self.norm2((x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask)))
x = self.norm3((x + self._ff_block(x)))
return x
def _sa_block(self, x: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)[0]
return self.dropout1(x)
def _mha_block(self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.multihead_attn(x, mem, mem, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)[0]
return self.dropout2(x)
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout3(x) |
def legacy_get_enum(size_average, reduce, emit_warning=True):
return get_enum(legacy_get_string(size_average, reduce, emit_warning)) |
def init_trial_path(args, is_save=True):
prename = ((((((args.dataset + '_') + str(args.test_dataset)) + '_') + str(args.n_shot_test)) + '_') + args.enc_gnn)
result_path = os.path.join(args.result_path, prename)
os.makedirs(result_path, exist_ok=True)
trial_id = 0
path_exists = True
while path_exists:
trial_id += 1
path_to_results = (result_path + '/{:d}'.format(trial_id))
path_exists = os.path.exists(path_to_results)
args.trial_path = path_to_results
os.makedirs(args.trial_path)
if is_save:
save_args(args)
return args |
def loss_dcgan_dis(netD, netG, x_real, z_rand, label):
with torch.no_grad():
x_fake = netG(z_rand, label).detach()
d_real = netD(x_real, label)
d_fake = netD(x_fake, label)
loss_real = F.binary_cross_entropy_with_logits(d_real, 1)
loss_fake = F.binary_cross_entropy_with_logits(d_fake, 0)
return ((loss_real + loss_fake).mean(), d_real.mean(), d_fake.mean()) |
def test(epoch, ternary, rel, norel, split='Test'):
model.eval()
if (not (len(rel[0]) == len(norel[0]))):
print('Not equal length for relation dataset and non-relation dataset.')
return
ternary = cvt_data_axis(ternary)
rel = cvt_data_axis(rel)
norel = cvt_data_axis(norel)
accuracy_ternary = []
accuracy_rels = []
accuracy_norels = []
loss_ternary = []
loss_binary = []
loss_unary = []
for batch_idx in range((len(rel[0]) // bs)):
tensor_data(ternary, batch_idx)
(acc_ter, l_ter) = model.test_(input_img, input_qst, label)
accuracy_ternary.append(acc_ter.item())
loss_ternary.append(l_ter.item())
tensor_data(rel, batch_idx)
(acc_bin, l_bin) = model.test_(input_img, input_qst, label)
accuracy_rels.append(acc_bin.item())
loss_binary.append(l_bin.item())
tensor_data(norel, batch_idx)
(acc_un, l_un) = model.test_(input_img, input_qst, label)
accuracy_norels.append(acc_un.item())
loss_unary.append(l_un.item())
accuracy_ternary = (sum(accuracy_ternary) / len(accuracy_ternary))
accuracy_rel = (sum(accuracy_rels) / len(accuracy_rels))
accuracy_norel = (sum(accuracy_norels) / len(accuracy_norels))
print('{} set: Ternary accuracy: {:.0f}% Binary accuracy: {:.0f}% | Unary accuracy: {:.0f}%'.format(split, accuracy_ternary, accuracy_rel, accuracy_norel))
summary_writer.add_scalars(f'Accuracy/{split}', {'ternary': accuracy_ternary, 'binary': accuracy_rel, 'unary': accuracy_norel}, epoch)
loss_ternary = (sum(loss_ternary) / len(loss_ternary))
loss_binary = (sum(loss_binary) / len(loss_binary))
loss_unary = (sum(loss_unary) / len(loss_unary))
summary_writer.add_scalars('Loss/test', {'ternary': loss_ternary, 'binary': loss_binary, 'unary': loss_unary}, epoch)
return (accuracy_ternary, accuracy_rel, accuracy_norel) |
def check_docker():
if (not check_cmd(['docker', 'version'])):
if (not on_linux):
error("Docker not found.\nIf you are using Docker Toolbox, make sure you are running 'satex'\nwithin the 'Docker quickstart Terminal'.")
else:
error('Docker not found.')
docker_argv = docker_call()
return docker_argv |
def slow(test_case):
if (not _run_slow_tests):
test_case = unittest.skip('test is slow')(test_case)
return test_case |
def download_image(row):
fname = _file_name(row)
if os.path.isfile(fname):
row['status'] = 200
row['file'] = fname
row['mimetype'] = magic.from_file(row['file'], mime=True)
row['size'] = os.stat(row['file']).st_size
return row
try:
response = requests.get(row['url'], stream=False, timeout=10, allow_redirects=True, headers=headers)
row['status'] = response.status_code
except Exception as e:
row['status'] = 408
return row
if response.ok:
try:
with open(fname, 'wb') as out_file:
response.raw.decode_content = True
out_file.write(response.content)
row['mimetype'] = magic.from_file(row['file'], mime=True)
row['size'] = os.stat(row['file']).st_size
except:
row['status'] = 408
return row
row['file'] = fname
return row |
class DisDocument(Document):
def __init__(self, dpath, epath):
Document.__init__(self, dpath)
self.datatype = 'dis'
self.eduPath = epath
def read(self):
basename = os.path.basename(self.path)
for e in ['.out', '.dis', '.txt', '.edus']:
basename = basename.replace(e, '')
if (basename in file_mapping):
self.outbasename = file_mapping[basename]
(tree, self.eduIds) = buildTree(open(self.path).read())
tree = binarizeTreeRight(tree)
doc = readEduDoc(self.eduPath, self)
tree = backprop(tree, self)
str_tree = parse(tree)
self.tree = Tree.fromstring(str_tree)
def writeEdu(self, outpath, docno=(- 1)):
if (self.outbasename != os.path.basename(self.path.split('.')[0])):
shutil.copy(self.eduPath, os.path.join(outpath, (self.outbasename.replace('.out', '').replace('.dis', '') + '.edus')))
else:
shutil.copy(self.eduPath.replace('.out', '').replace('.dis', ''), outpath) |
def test_alpha_in_predict() -> None:
mapie_reg = MapieQuantileRegressor()
mapie_reg.fit(X, y)
with pytest.warns(UserWarning, match='WARNING: ensemble is not util*'):
mapie_reg.predict(X, ensemble=True) |
class mnist_model(nn.Module):
def __init__(self):
super(mnist_model, self).__init__()
self.layer1 = nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=0)
self.layer2 = nn.Conv2d(16, 16, kernel_size=5, stride=1, padding=0)
self.layer3 = nn.Linear(256, 100, bias=True)
self.layer4 = nn.Linear(100, 10, bias=True)
self.act = nn.ReLU()
self.pool = nn.MaxPool2d((2, 2))
def forward(self, x):
out = self.act(self.layer1(x))
out = self.pool(out)
out = self.act(self.layer2(out))
out = self.pool(out)
out = out.view((- 1), 256)
out = self.act(self.layer3(out))
out = self.act(self.layer4(out))
return out
def output(self, x):
out1 = self.act(self.layer1(x))
out1 = self.pool(out1)
out2 = self.act(self.layer2(out1))
out2 = self.pool(out2)
out2 = out2.view((- 1), 256)
out3 = self.act(self.layer3(out2))
out4 = self.act(self.layer4(out3))
return (out1, out2, out3, out4) |
_model_architecture(model_name='s2t_transformer', arch_name='s2t_transformer')
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, 'encoder_freezing_updates', 0)
args.conv_kernel_sizes = getattr(args, 'conv_kernel_sizes', '5,5')
args.conv_channels = getattr(args, 'conv_channels', 1024)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', args.dropout)
args.activation_dropout = getattr(args, 'activation_dropout', args.dropout)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_layerdrop = getattr(args, 'decoder_layerdrop', 0.0)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.quant_noise_pq = getattr(args, 'quant_noise_pq', 0) |
class L2Loss(nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super(L2Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target):
loss = (self.loss_weight * l2_loss(pred, target, reduction=self.reduction))
return loss |
def test_dpt_head():
with pytest.raises(AssertionError):
head = DPTHead(in_channels=[768, 768, 768, 768], channels=4, num_classes=19, in_index=[0, 1, 2, 3])
head = DPTHead(in_channels=[768, 768, 768, 768], channels=4, num_classes=19, in_index=[0, 1, 2, 3], input_transform='multiple_select')
inputs = [[torch.randn(4, 768, 2, 2), torch.randn(4, 768)] for _ in range(4)]
output = head(inputs)
assert (output.shape == torch.Size((4, 19, 16, 16)))
head = DPTHead(in_channels=[768, 768, 768, 768], channels=4, num_classes=19, in_index=[0, 1, 2, 3], input_transform='multiple_select', readout_type='add')
output = head(inputs)
assert (output.shape == torch.Size((4, 19, 16, 16)))
head = DPTHead(in_channels=[768, 768, 768, 768], channels=4, num_classes=19, in_index=[0, 1, 2, 3], input_transform='multiple_select', readout_type='project')
output = head(inputs)
assert (output.shape == torch.Size((4, 19, 16, 16))) |
def create_mpi_script(driver_path, args, hostname, gpus, resource_info, machine_id, partitions, search, port=22):
cmd = ('ssh -p %d %s "mkdir -p %s"' % (port, hostname, REMOTE_PARALLAX_ROOT))
parallax_log.warning(colored(('\n$ %s' % cmd), 'red'))
proc = subprocess.Popen(args=cmd, shell=True)
proc.wait()
cmd_run = ('python %s %s' % (driver_path, ' '.join(args)))
try:
parallax_log_level = os.environ['PARALLAX_LOG_LEVEL']
except:
parallax_log_level = logging.INFO
env = {'CUDA_VISIBLE_DEVICES': ','.join((str(gpuid) for gpuid in gpus)), 'PARALLAX_LOG_LEVEL': parallax_log_level, PARALLAX_MACHINE_ID: machine_id, PARALLAX_HOSTNAME: hostname, 'PARALLAX_RESOURCE_INFO': resource_info, PARALLAX_SEARCH: search}
if partitions:
env[PARALLAX_PARTITIONS] = partitions
cmd_env = ' '.join(map((lambda k: ('export %s=%s;' % (k[0], k[1]))), env.items()))
try:
cmd_venv = (' source %s/bin/activate; ' % os.environ['VIRTUAL_ENV'])
full_cmd = ' '.join([cmd_env, cmd_venv, cmd_run])
except:
full_cmd = ' '.join([cmd_env, cmd_run])
mpi_script = ('bash -c "%s"' % full_cmd)
remote_cmd = ("echo '%s' | " % mpi_script)
remote_cmd += ('ssh -p %d %s' % (port, hostname))
remote_cmd += (" 'cat > %s' && chmod 777 %s" % (REMOTE_MPI_SCRIPT_PATH, REMOTE_MPI_SCRIPT_PATH))
print(colored(('\n$ %s' % remote_cmd), 'red'))
proc = subprocess.Popen(args=remote_cmd, shell=True)
proc.wait() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.