code stringlengths 101 5.91M |
|---|
_model
def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs)
return model |
class RepresentationWarpEncoder(nn.Module):
def __init__(self, *, ch, ch_mult, num_res_blocks, attn_resolutions, r_channels, dropout=0.0, in_channels, resolution, act='silu', num_heads=1, num_ch_per_head=None, embedding_type='fourier', fourier_scale=16.0, use_adaptive_group_norm=False, scale_by_sqrt2=False, num_classes=None, time_conditional=False):
super().__init__()
self.act = get_act(act)
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.num_classes = num_classes
self.embedding_type = embedding_type
self._time_conditional = time_conditional
if time_conditional:
self.time_conditioning = nn.ModuleList()
if (self.embedding_type.lower() == 'fourier'):
self.time_conditioning.append(GaussianFourierProjection(embedding_size=ch, scale=fourier_scale))
embed_dim = (2 * ch)
elif (self.embedding_type.lower() == 'positional'):
self.time_conditioning.append(PositionalSinusoidalEmbedding(embedding_dim=ch))
embed_dim = ch
else:
raise ValueError(f"Invalid embedding_type. Must be one of: 'fourier', 'positional'. Was: '{self.embedding_type}'")
self.time_conditioning.append(nn.Linear(embed_dim, (4 * ch)))
self.time_conditioning.append(self.act)
self.time_conditioning.append(nn.Linear((4 * ch), (4 * ch)))
self.time_conditioning = nn.Sequential(*self.time_conditioning)
self._class_conditional = (self.num_classes is not None)
if self._class_conditional:
self.label_emb = nn.Embedding(self.num_classes, (4 * ch))
_AttnBlock = functools.partial(AttentionBlock, num_heads=num_heads, num_ch_per_head=num_ch_per_head, scale_by_sqrt2=scale_by_sqrt2)
_ResnetBlock = functools.partial(ResnetBlockBigGAN, act=self.act, emb_dim=(4 * ch), dropout=dropout, use_adaptive_group_norm=use_adaptive_group_norm, scale_by_sqrt2=scale_by_sqrt2)
_Downsample = functools.partial(_ResnetBlock, down=True)
self.conv_in = nn.Conv2d(in_channels, ch, 3, padding=1)
self.down = nn.ModuleList()
current_res = resolution
in_ch = ch
h_channels = [in_ch]
for level in range(self.num_resolutions):
stage = nn.Module()
stage.main = nn.ModuleList()
stage.uses_attn = (current_res in attn_resolutions)
out_ch = (ch * ch_mult[level])
for _ in range(self.num_res_blocks):
stage.main.append(_ResnetBlock(in_ch=in_ch, out_ch=out_ch))
if stage.uses_attn:
stage.main.append(_AttnBlock(channels=out_ch))
h_channels.append(out_ch)
in_ch = out_ch
if (level != (self.num_resolutions - 1)):
stage.downsample = _Downsample(in_ch=in_ch)
current_res = (current_res // 2)
h_channels.append(in_ch)
self.down.append(stage)
self.mid = nn.ModuleList([_ResnetBlock(in_ch=in_ch), _AttnBlock(channels=in_ch), _ResnetBlock(in_ch=in_ch)])
self.norm_out = nn.GroupNorm(num_groups=min((in_ch // 4), 32), num_channels=in_ch, eps=1e-06)
self.conv_out = torch.nn.Conv2d(in_ch, (2 * r_channels), kernel_size=3, stride=1, padding=1)
def time_conditional(self):
return self._time_conditional
def class_conditional(self):
return self._class_conditional
def forward(self, x, time_cond=None, y=None):
emb = None
if self.time_conditional:
assert (time_cond is not None)
assert (time_cond.shape == (x.shape[0],))
emb = self.time_conditioning(time_cond)
if self.class_conditional:
assert (y is not None), 'Missing class label for class-cond. model'
assert (y.shape == (x.shape[0],))
if (emb is not None):
emb = (emb + self.label_emb(y))
else:
emb = self.label_emb(y)
h = self.conv_in(x)
for stage in self.down:
for block in stage.main:
if (stage.uses_attn and isinstance(block, AttentionBlock)):
h = block(h)
else:
h = block(h, emb)
if hasattr(stage, 'downsample'):
h = stage.downsample(h, emb)
for block in self.mid:
if isinstance(block, AttentionBlock):
h = block(h)
else:
h = block(h, emb)
h = self.norm_out(h)
h = self.act(h)
h = self.conv_out(h)
return h |
def test_mildnonaxi_oortA_grid():
idf = dehnendf(beta=0.0)
pot = [LogarithmicHaloPotential(normalize=1.0), EllipticalDiskPotential(twophio=0.001)]
edf = evolveddiskdf(idf, pot=pot, to=(- 10.0))
(oa, grid, dgridR, dgridphi) = edf.oortA(0.9, phi=0.2, integrate_method='rk6_c', grid=True, derivRGrid=True, derivphiGrid=True, returnGrids=True, gridpoints=_GRIDPOINTS, derivGridpoints=_GRIDPOINTS)
ioa = idf.oortA(0.9)
assert (numpy.fabs((oa - ioa)) < 0.005), 'oortA of evolveddiskdf for axisymmetric potential is not equal to that of initial DF'
oa = edf.oortA(0.9, phi=0.2, integrate_method='rk6_c', grid=grid, derivRGrid=dgridR, derivphiGrid=dgridphi, gridpoints=_GRIDPOINTS, derivGridpoints=_GRIDPOINTS)
assert (numpy.fabs((oa - ioa)) < 0.005), 'oortA of evolveddiskdf for axisymmetric potential is not equal to that of initial DF when calculated with pre-computed grid'
return None |
class RandomAffine():
def __call__(self, image):
image = numpy.asarray(image)
random_hsv(image)
image = random_affine(image)
return Image.fromarray(image) |
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end]
self.map = {}
if (iterable is not None):
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return (key in self.map)
def add(self, key):
if (key not in self.map):
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if (key in self.map):
(key, prev, next) = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while (curr is not end):
(yield curr[0])
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while (curr is not end):
(yield curr[0])
curr = curr[1]
def pop(self, last=True):
if (not self):
raise KeyError('set is empty')
key = (self.end[1][0] if last else self.end[2][0])
self.discard(key)
return key
def __repr__(self):
if (not self):
return ('%s()' % (self.__class__.__name__,))
return ('%s(%r)' % (self.__class__.__name__, list(self)))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return ((len(self) == len(other)) and (list(self) == list(other)))
return (set(self) == set(other)) |
def load_model_(model_path):
check = 'model_/checkpoint_epoch_168_val_loss_0._dice_0..pth'
network = unet3d.UNet3D(in_channels=4, out_channels=4, final_sigmoid=False, f_maps=32, layer_order='crg', num_levels=4, num_groups=4, conv_padding=1)
return _load(network, model_path, check) |
def test_reset_scorer() -> None:
with pytest.raises(ValueError, match='useless is not a valid scorer '):
get_scorer('useless')
register_scorer('useless', make_scorer(_return_1))
get_scorer('useless')
reset_scorer_register()
with pytest.raises(ValueError, match='useless is not a valid scorer '):
get_scorer('useless') |
class TestKLDiv(unittest.TestCase):
def setUp(self) -> None:
self.shape = (10, 5, 224, 224)
self.logit = torch.randn(*self.shape, requires_grad=True)
self.pred = F.softmax(self.logit, 1)
self.target = torch.randint(low=0, high=self.shape[1], size=[self.shape[i] for i in range(self.shape.__len__()) if (i != 1)])
self.target_oh = class2one_hot(self.target, C=self.shape[1]).float()
def _test_kl_equivalent(self, reduction='mean'):
kl_criterion = nn.KLDivLoss(reduction=reduction)
kl_loss = kl_criterion(self.pred.log(), target=self.target_oh)
_kl_loss = loss.KL_div(reduction=reduction)(self.pred, self.target_oh)
assert torch.isclose(kl_loss, ((_kl_loss / self.shape[1]) if (reduction == 'mean') else _kl_loss))
def test_kl_equivalent(self):
for reduction in ('sum', 'mean'):
self._test_kl_equivalent(reduction=reduction)
def test_entropy(self):
random_entropy = loss.Entropy()(self.pred)
with self.assertRaises(AssertionError):
loss.Entropy()(self.logit)
max_entropy = loss.Entropy()(torch.zeros_like(self.pred).fill_((1 / self.shape[1])))
assert (random_entropy <= max_entropy)
zero_entropy = loss.Entropy()(torch.Tensor([[1, 0], [0, 1]]))
assert (zero_entropy == 0) |
def neuralchat_client_execute():
com = neuralchat_client_commands
idx = 0
for _argv in (['neuralchat_client'] + sys.argv[1:]):
if (_argv not in com):
break
idx += 1
com = com[_argv]
if (not callable(com['_command'])):
i = com['_command'].rindex('.')
(module, cls) = (com['_command'][:i], com['_command'][(i + 1):])
exec('from {} import {}'.format(module, cls))
com['_command'] = locals()[cls]
status = (0 if com['_command']().execute(sys.argv[idx:]) else 1)
return status |
def mdetr_efficientnetB3(pretrained=False, return_postprocessor=False):
model = _make_detr('timm_tf_efficientnet_b3_ns')
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url=' map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model |
def ODETimeSplitter(num_split, K):
eta = (K / num_split)
return [(i * eta) for i in range(1, num_split)] |
_model
def semnasnet_075(pretrained=False, **kwargs):
model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs)
return model |
class PreResUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride):
super(PreResUnit, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
if bottleneck:
self.body = PreResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride)
else:
self.body = PreResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1(in_channels=in_channels, out_channels=out_channels, stride=stride)
def forward(self, x):
identity = x
(x, x_pre_activ) = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = (x + identity)
return x |
def _replace_end(string, end, new_end):
if (string == end):
return new_end
if string.endswith(('.' + end)):
return ((string[:(- len(('.' + end)))] + '.') + new_end)
return string |
_cache(maxsize=100)
def get_tiny_tokenizer_from_checkpoint(checkpoint):
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
if (tokenizer.vocab_size < 300):
return tokenizer
logger.info('Training new from iterator ...')
vocabulary = ((string.ascii_letters + string.digits) + ' ')
tokenizer = tokenizer.train_new_from_iterator(vocabulary, vocab_size=len(vocabulary), show_progress=False)
logger.info('Trained.')
return tokenizer |
class BirdsRefTask(RefTask):
def __init__(self):
super(BirdsRefTask, self).__init__(N_FEATURES)
with open('data/birds/labels.p') as feature_f:
features = pickle.load(feature_f)
self.random = np.random.RandomState(0)
(n_raw_features,) = features.values()[0].shape
projector = self.random.randn(N_FEATURES, n_raw_features)
proj_features = {}
m1 = np.zeros(N_FEATURES)
m2 = np.zeros(N_FEATURES)
for (k, v) in features.items():
proj = v
m1 += proj
m2 += (proj ** 2)
proj_features[k] = proj
m1 /= len(features)
m2 /= len(features)
std = np.sqrt((m2 - (m1 ** 2)))
self.features = proj_features
self.keys = sorted(self.features.keys())
self.folds = {}
for fold in ['train', 'val', 'test']:
with open(('data/birds/%s.txt' % fold)) as fold_f:
self.folds[fold] = [line.strip() for line in fold_f]
anns = {}
with open('data/birds/cub_0917_5cap.tsv') as ann_f:
reader = csv.reader(ann_f, delimiter='\t')
header = reader.next()
i_url = header.index('Input.image_url')
i_desc = header.index('Answer.Description')
for line in reader:
url = line[i_url]
desc = line[i_desc]
key = '/'.join(url.split('/')[(- 2):])
desc = desc.lower().replace('.', '').replace(',', '')
anns[key] = tuple(desc.split())
counts = defaultdict((lambda : 0))
for desc in anns.values():
for i in range((len(desc) - 1)):
bigram = desc[i:(i + 2)]
counts[bigram] += 1
freq_terms = sorted(counts.items(), key=(lambda p: (- p[1])))
freq_terms = [f for f in freq_terms if (not any(((w in STOP) for w in f[0])))]
freq_terms = [f[0] for f in freq_terms]
freq_terms = freq_terms[:50]
self.vocab = {'_': 0, 'UNK': 1}
self.reverse_vocab = {0: '_', 1: 'UNK'}
self.lexicon = [[0]]
for term in freq_terms:
for word in term:
if (word in self.vocab):
continue
index = len(self.vocab)
self.vocab[word] = index
self.reverse_vocab[index] = word
self.lexicon.append([self.vocab[w] for w in term])
discarded = []
self.reps = {}
for (k, desc) in anns.items():
rep = np.zeros(len(self.lexicon))
out = []
for (i_l, l) in enumerate(self.lexicon):
if all(((self.reverse_vocab[w] in desc) for w in l)):
rep[i_l] = 1
out += l
if (len(out) == 0):
discarded.append(k)
continue
assert rep.any()
rep /= np.sum(rep)
self.reps[k] = rep
for k in discarded:
del anns[k]
for fold in self.folds.values():
if (k in fold):
fold.remove(k)
self.empty_desc = np.zeros(len(self.lexicon))
self.empty_desc[0] = 1
def reset_test(self):
self.random = np.random.RandomState(0)
def get_pair(self, fold):
fold_keys = self.folds[fold]
(i1, i2) = self.random.randint(len(fold_keys), size=2)
(k1, k2) = (fold_keys[i1], fold_keys[i2])
(f1, f2) = (self.features[k1], self.features[k2])
rep = self.reps[k1]
return (f1, f2, rep, k1, k2)
def visualize(self, state, agent):
url_template = '
url1 = (url_template % state.left_data)
url2 = (url_template % state.right_data)
if ((agent == 0) and (state.target == 1)):
(url1, url2) = (url2, url1)
html_template = "<img src='%s'>"
return ((html_template % url1) + (html_template % url2))
def turk_visualize(self, state, agent, loc):
left = state.left_data
right = state.right_data
if ((agent == 0) and (state.target == 1)):
(left, right) = (right, left)
return (left, right)
def pp(self, indices):
return ' '.join([self.reverse_vocab[i] for i in indices]) |
def load_dict_classifiers(args, classifiers_name=DEFAULT_LIST_CLASSIFIERS):
dict_classifier = load_list_classifiers(args, classifiers_name=classifiers_name)
(l_train, l_test) = split_classif_list(args, dict_classifier)
print(f'''
Training on {l_train.keys()}
''')
print(f'''
Testing on {l_test.keys()}
''')
if args.ensemble_adv_trained:
l_test = load_ens_adv_model(args)
return (l_train, l_test) |
def training_user_task(model, sess):
best_loss = 0
saver = tf.train.Saver()
ts_u = data.generate_user_dict_train(setting.oracle_training_file_user_task)
vs_u = data.generate_user_dict_valid(setting.oracle_valid_file_user_task)
train_batches = data.generate_meta_train_user_set(ts_u)
valid_batches = data.generate_meta_valid_user_set(vs_u)
num_batch = (int(train_batches[3]) // setting.batch_size_user)
batch_index = range(num_batch)
valid_num_batch = (int(valid_batches[3]) // setting.batch_size_user)
valid_batch_index = range(valid_num_batch)
for epoch_count in range(setting.user_epoch):
train_begin = time()
training_batch_user_task(batch_index, model, sess, train_batches, True)
train_time = (time() - train_begin)
if ((epoch_count % setting.verbose) == 0):
loss_begin = time()
train_loss = training_loss_user_task(batch_index, model, sess, train_batches, True)
loss_time = (time() - loss_begin)
eval_begin = time()
cosine = evaluate_user_task(valid_batch_index, model, sess, valid_batches, True)
eval_time = (time() - eval_begin)
print(('epoch %d, train time is %.4f, loss time is %.4f, eval_time is %.4f, train_loss is %.4f, test cosine value is %.4f' % (epoch_count, train_time, loss_time, eval_time, train_loss, cosine)))
if (cosine < best_loss):
best_loss = cosine
saver.save(sess, setting.checkpoint_path_user_task, global_step=epoch_count)
ts_u = data.generate_user_dict_train(setting.oracle_training_file_user_task)
vs_u = data.generate_user_dict_valid(setting.oracle_valid_file_user_task)
train_batches = data.generate_meta_train_user_set(ts_u)
valid_batches = data.generate_meta_valid_user_set(vs_u) |
class TestTransitDepthCalculator(unittest.TestCase):
def get_frac_dev(self, logZ, CO_ratio, custom_abundances):
Rp = .0
Mp = 2e+27
Rs = .0
T = 1200
depth_calculator = TransitDepthCalculator()
(wavelengths, transit_depths) = depth_calculator.compute_depths(Rs, Mp, Rp, T, logZ=logZ, CO_ratio=CO_ratio, custom_abundances=custom_abundances, cloudtop_pressure=10000.0)
(ref_wavelengths, ref_depths) = np.loadtxt('tests/testing_data/hot_jupiter_spectra.dat', unpack=True, skiprows=2)
ref_depths /= 100
frac_dev = (np.abs((ref_depths - transit_depths)) / ref_depths)
return frac_dev
def test_custom_file(self):
custom_abundances = AbundanceGetter.from_file('tests/testing_data/abund_1Xsolar_cond.dat')
for key in ['HCl', 'HF', 'MgH', 'SH', 'SiH', 'C2H2', 'C2H4', 'C2H6', 'H2CO', 'OCS']:
del custom_abundances[key]
frac_dev = self.get_frac_dev(None, None, custom_abundances)
self.assertLess(np.percentile(frac_dev, 95), 0.03)
self.assertLess(np.max(frac_dev), 0.07)
def test_ggchem(self):
frac_dev = self.get_frac_dev(0, 0.53, None)
self.assertLess(np.percentile(frac_dev, 95), 0.03)
self.assertLess(np.max(frac_dev), 0.1)
def test_unbound_atmosphere(self):
Rp = 6378000.0
Mp = 5.97e+20
Rs = .0
T = 300
depth_calculator = TransitDepthCalculator()
with self.assertRaises(AtmosphereError):
(wavelengths, transit_depths) = depth_calculator.compute_depths(Rs, Mp, Rp, T, logZ=0.2, CO_ratio=1.1, T_star=6100)
def test_bin_wavelengths(self):
Rp = .0
Mp = 7.49e+26
Rs = .0
T = 1200
depth_calculator = TransitDepthCalculator()
bins = np.array([[0.4, 0.6], [1, 1.1], [1.2, 1.4], [3.2, 4], [5, 6]])
bins *= 1e-06
depth_calculator.change_wavelength_bins(bins)
(wavelengths, transit_depths) = depth_calculator.compute_depths(Rs, Mp, Rp, T, logZ=0.2, CO_ratio=1.1, T_star=6100)
self.assertEqual(len(wavelengths), len(bins))
self.assertEqual(len(transit_depths), len(bins))
(wavelengths, transit_depths) = depth_calculator.compute_depths(Rs, Mp, Rp, T, logZ=0.2, CO_ratio=1.1, T_star=12000)
self.assertEqual(len(wavelengths), len(bins))
self.assertEqual(len(transit_depths), len(bins))
def test_power_law_haze(self):
Rs = R_sun
Mp = M_jup
Rp = R_jup
T = 1200
abundances = AbundanceGetter().get(0, 0.53)
for key in abundances:
abundances[key] *= 0
abundances['H2'] += 1
depth_calculator = TransitDepthCalculator()
(wavelengths, transit_depths, info_dict) = depth_calculator.compute_depths(Rs, Mp, Rp, T, logZ=None, CO_ratio=None, cloudtop_pressure=np.inf, custom_abundances=abundances, add_gas_absorption=False, add_collisional_absorption=False, full_output=True)
g = ((G * Mp) / (Rp ** 2))
H = ((k_B * T) / ((2 * AMU) * g))
gamma = 0.57721
polarizability = 8.059e-31
sigma = ((((128.0 * (np.pi ** 5)) / 3) * (polarizability ** 2)) / (depth_calculator.atm.lambda_grid ** 4))
kappa = (sigma / (2 * AMU))
P_surface = .0
R_surface = info_dict['radii'][(- 1)]
tau_surface = (((P_surface / g) * np.sqrt((((2 * np.pi) * R_surface) / H))) * kappa)
analytic_R = (R_surface + (H * ((gamma + np.log(tau_surface)) + scipy.special.expn(1, tau_surface))))
analytic_depths = ((analytic_R ** 2) / (Rs ** 2))
ratios = (analytic_depths / transit_depths)
relative_diffs = np.abs((ratios - 1))
self.assertTrue(np.all((relative_diffs < 0.001)))
def test_k_coeffs_unbinned(self):
xsec_calc = TransitDepthCalculator(method='xsec')
ktab_calc = TransitDepthCalculator(method='ktables')
(xsec_wavelengths, xsec_depths) = xsec_calc.compute_depths(R_sun, M_jup, R_jup, 1000)
N = 10
smoothed_xsec_wavelengths = uniform_filter(xsec_wavelengths, N)[::N]
smoothed_xsec_depths = uniform_filter(xsec_depths, N)[::N]
(ktab_wavelengths, ktab_depths) = ktab_calc.compute_depths(R_sun, M_jup, R_jup, 1000)
diffs = np.abs((ktab_depths - smoothed_xsec_depths[:(- 1)]))
self.assertTrue((np.median(diffs) < 2e-05))
self.assertTrue((np.percentile(diffs, 95) < 5e-05))
self.assertTrue((np.max(diffs) < 0.00015))
def test_k_coeffs_binned(self):
wavelengths = np.exp(np.arange(np.log(3.1e-07), np.log(2.9e-05), (1.0 / 20)))
wavelength_bins = np.array([wavelengths[0:(- 1)], wavelengths[1:]]).T
xsec_calc = TransitDepthCalculator(method='xsec')
xsec_calc.change_wavelength_bins(wavelength_bins)
ktab_calc = TransitDepthCalculator(method='ktables')
ktab_calc.change_wavelength_bins(wavelength_bins)
(wavelengths, xsec_depths) = xsec_calc.compute_depths(R_sun, M_jup, R_jup, 300, logZ=1, CO_ratio=1.5)
(wavelengths, ktab_depths) = ktab_calc.compute_depths(R_sun, M_jup, R_jup, 300, logZ=1, CO_ratio=1.5)
diffs = np.abs((ktab_depths - xsec_depths))
self.assertTrue((np.median(diffs) < 1e-05))
self.assertTrue((np.percentile(diffs, 95) < 2e-05))
self.assertTrue((np.max(diffs) < 3e-05))
def test_bounds_checking(self):
Rp = .0
Mp = 7.49e+26
Rs = .0
T = 1200
logZ = 0
CO_ratio = 1.1
calculator = TransitDepthCalculator()
with self.assertRaises(AtmosphereError):
calculator.compute_depths(Rs, Mp, Rp, 199, logZ=logZ, CO_ratio=CO_ratio)
with self.assertRaises(AtmosphereError):
calculator.compute_depths(Rs, Mp, Rp, 3001, logZ=logZ, CO_ratio=CO_ratio)
with self.assertRaises(ValueError):
calculator.compute_depths(Rs, Mp, Rp, T, logZ=(- 1.1), CO_ratio=CO_ratio)
with self.assertRaises(ValueError):
calculator.compute_depths(Rs, Mp, Rp, T, logZ=3.1, CO_ratio=CO_ratio)
with self.assertRaises(ValueError):
calculator.compute_depths(Rs, Mp, Rp, T, logZ=logZ, CO_ratio=0.01)
with self.assertRaises(ValueError):
calculator.compute_depths(Rs, Mp, Rp, T, logZ=logZ, CO_ratio=11)
with self.assertRaises(ValueError):
calculator.compute_depths(Rs, Mp, Rp, T, logZ=logZ, CO_ratio=CO_ratio, cloudtop_pressure=0.0001)
with self.assertRaises(ValueError):
calculator.compute_depths(Rs, Mp, Rp, T, logZ=logZ, CO_ratio=CO_ratio, cloudtop_pressure=.0)
calculator.compute_depths(Rs, Mp, Rp, T, logZ=logZ, CO_ratio=CO_ratio, cloudtop_pressure=np.inf) |
def preprocess_data(args):
if (args['task_name'] == 'paws_labeled_final_wiki'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/paws', 'labeled_final', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'paws_labeled_swap_wiki'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/paws', 'labeled_swap', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'paws_unlabeled_final_wiki'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/paws', 'unlabeled_final', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'paws_qqp'):
data_0 = pd.read_csv('/yanglinyi/new_codes_cluster/paws/paws_qqp/output/train.tsv', sep='\t')
data_1 = pd.read_csv('/yanglinyi/new_codes_cluster/paws/paws_qqp/output/dev_and_test.tsv', sep='\t')
data = pd.concat([data_0, data_1], ignore_index=True)
data = da.Dataset.from_pandas(data)
elif (args['task_name'] == 'qqp'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/glue', 'qqp', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'mrpc'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/glue', 'mrpc', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'rte'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/glue', 'rte', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'mnli_mismatched'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/glue', 'mnli', split='validation_mismatched', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'sick_num5'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/sick', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'sick_num3'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/sick', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'hans'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/hans', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'qnli_validation'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/glue', 'qnli', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
data = data.rename_column('question', 'sentence1')
data = data.rename_column('sentence', 'sentence2')
elif (args['task_name'] == 'cola_validation'):
data = load_dataset('/yanglinyi/new_codes_cluster/datasets/glue', 'cola', cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
elif (args['task_name'] == 'cola_ood'):
from datasets import load_from_disk
data = load_from_disk('/yanglinyi/new_codes_cluster/cola_data/cola_ood')
elif (args['task_name'] == 'qnli_ood'):
from datasets import load_from_disk
data = load_from_disk('/yanglinyi/new_codes_cluster/qnli_data/qnli_ood')
elif (args['task_name'] == 'scitail'):
data = pd.read_csv('/yanglinyi/new_codes_cluster/datasets/scitail/merged_scitail.tsv', sep='\t')
data = da.Dataset.from_pandas(data)
elif (args['task_name'] == 'flipkart'):
data = pd.read_csv('/yanglinyi/new_codes_cluster/datasets/flipkart/flipkart_updated.tsv', sep='\t', error_bad_lines=False)
data = da.Dataset.from_pandas(data)
elif (args['task_name'] == 'twitter'):
data_train = pd.read_csv('/yanglinyi/new_codes_cluster/datasets/SemEval-PIT2015-github/train.tsv', sep='\t', error_bad_lines=False)
data_dev = pd.read_csv('/yanglinyi/new_codes_cluster/datasets/SemEval-PIT2015-github/dev.tsv', sep='\t', error_bad_lines=False)
data_test = pd.read_csv('/yanglinyi/new_codes_cluster/datasets/SemEval-PIT2015-github/test.tsv', sep='\t', error_bad_lines=False)
data = pd.concat([data_train, data_dev, data_test], ignore_index=True)
data = da.Dataset.from_pandas(data)
else:
data = load_dataset(('/yanglinyi/new_codes_cluster/datasets/' + args['task_name']), cache_dir='/yanglinyi/new_codes_cluster/huggingface/datasets')
if (args['task_name'] == 'sick_num5'):
data = data.map((lambda examples: {'relatedness_score': round((examples['relatedness_score'] - 1.0))}))
data = data.rename_column('sentence_A', 'sentence1')
data = data.rename_column('sentence_B', 'sentence2')
data = data.remove_columns(['id', 'label', 'entailment_AB', 'entailment_BA', 'sentence_A_original', 'sentence_B_original', 'sentence_A_dataset', 'sentence_B_dataset'])
data = data.rename_column('relatedness_score', 'label')
if (args['task_name'] == 'sick_num3'):
data = data.rename_column('sentence_A', 'sentence1')
data = data.rename_column('sentence_B', 'sentence2')
data = data.remove_columns(['id', 'relatedness_score', 'entailment_AB', 'entailment_BA', 'sentence_A_original', 'sentence_B_original', 'sentence_A_dataset', 'sentence_B_dataset'])
if (args['task_name'] == 'imdb'):
data = data.rename_column('text', 'sentence')
elif (args['task_name'] == 'yelp_polarity'):
data = data.rename_column('text', 'sentence')
elif (args['task_name'] == 'amazon_polarity'):
data = data.rename_column('content', 'sentence')
data = data.remove_columns('title')
elif (args['task_name'] == 'qqp'):
if (args['ID_name'] == 'rte'):
data = data.map((lambda examples: {'label': ((- examples['label']) + 1.0)}))
data = data.rename_column('question1', 'sentence1')
data = data.rename_column('question2', 'sentence2')
elif (args['task_name'] == 'rte'):
if (args['ID_name'] != 'rte'):
data = data.map((lambda examples: {'label': ((- examples['label']) + 1.0)}))
elif (args['task_name'] == 'mnli_mismatched'):
data = data.rename_column('hypothesis', 'sentence2')
data = data.rename_column('premise', 'sentence1')
elif (args['task_name'] == 'snli'):
data = data.rename_column('hypothesis', 'sentence2')
data = data.rename_column('premise', 'sentence1')
elif (args['task_name'] == 'mrpc'):
if (args['ID_name'] == 'rte'):
data = data.map((lambda examples: {'label': ((- examples['label']) + 1.0)}))
elif (args['task_name'] == 'hans'):
if (args['ID_name'] != 'rte'):
data = data.map((lambda examples: {'label': ((- examples['label']) + 1.0)}))
data = data.rename_column('premise', 'sentence1')
data = data.rename_column('hypothesis', 'sentence2')
data = data.remove_columns(['parse_premise', 'parse_hypothesis', 'binary_parse_premise', 'binary_parse_hypothesis', 'heuristic', 'subcase', 'template'])
elif ('paws' in args['task_name']):
if (args['ID_name'] == 'rte'):
data = data.map((lambda examples: {'label': ((- examples['label']) + 1.0)}))
elif (args['task_name'] == 'qnli_ood'):
data = data.rename_column('question', 'sentence1')
data = data.rename_column('sentence', 'sentence2')
def convert_qnli_dataset(example):
if (example['label'] == 'entailment'):
return 0
elif (example['label'] == 'not_entailment'):
return 1
else:
raise ValueError('label not legal')
data = data.map((lambda examples: {'label': convert_qnli_dataset(examples)}))
elif (args['task_name'] == 'scitail'):
def convert_scitail_dataset(example):
if (example['label'] == 'entails'):
return 0
elif (example['label'] == 'neutral'):
return 1
else:
raise ValueError('label not legal')
data = data.map((lambda examples: {'label': convert_scitail_dataset(examples)}))
data = data.rename_column('premise', 'sentence1')
data = data.rename_column('hypothesis', 'sentence2')
elif (args['task_name'] == 'flipkart'):
def convert_flipkart_dataset(example):
if (example['label'] == 'negative'):
return 0
elif (example['label'] == 'neutral'):
return 0
elif (example['label'] == 'positive'):
return 1
else:
raise ValueError('label not legal')
data = data.rename_column('Summary', 'sentence')
data = data.rename_column('Sentiment', 'label')
data = data.map((lambda examples: {'label': convert_flipkart_dataset(examples)}))
elif (args['task_name'] == 'twitter'):
data = data.rename_column('Sent_1', 'sentence1')
data = data.rename_column('Sent_2', 'sentence2')
data = data.rename_column('Label', 'label')
if ((args['task_name'] == 'imdb') or (args['task_name'] == 'yelp_polarity') or (args['task_name'] == 'amazon_polarity')):
data = concatenate_datasets([data['train'], data['test']])
elif (args['task_name'] == 'paws_labeled_final_wiki'):
data = concatenate_datasets([data['train'], data['test'], data['validation']])
elif (args['task_name'] == 'paws_labeled_swap_wiki'):
data = data['train']
elif (args['task_name'] == 'paws_unlabeled_final_wiki'):
data = concatenate_datasets([data['train'], data['validation']])
elif (args['task_name'] == 'paws_qqp'):
data = data
elif (args['task_name'] == 'sick_num5'):
data = concatenate_datasets([data['train'], data['test'], data['validation']])
elif (args['task_name'] == 'sick_num3'):
data = concatenate_datasets([data['train'], data['test'], data['validation']])
elif (args['task_name'] == 'qqp'):
data = concatenate_datasets([data['train'], data['validation']])
elif (args['task_name'] == 'rte'):
data = concatenate_datasets([data['train'], data['validation']])
elif (args['task_name'] == 'mrpc'):
data = concatenate_datasets([data['train'], data['validation']])
elif (args['task_name'] == 'mnli_mismatched'):
data = data
elif (args['task_name'] == 'snli'):
data = concatenate_datasets([data['train'], data['test'], data['validation']])
elif (args['task_name'] == 'hans'):
data = concatenate_datasets([data['train'], data['validation']])
elif (args['task_name'] == 'qnli_validation'):
data = data['validation']
elif (args['task_name'] == 'cola_validation'):
data = data['validation']
elif (args['task_name'] == 'cola_ood'):
data = data
elif (args['task_name'] == 'qnli_ood'):
data = data
return data |
_group.command('add')
('env')
('endpoint')
def add_environment(env, endpoint):
click.echo(f'Adding environment:')
click.echo(f' ENVIRONMENT: {env}')
click.echo(f' ENDPOINT: {endpoint}')
add_env(env, endpoint) |
def gen_layer(C_in, C_out, model_index):
swap = False
if ((model_index >= 0) and (model_index <= 251)):
prim_index = (model_index // 63)
model_index = (model_index % 63)
conv_index = (((model_index // len(genotypes.ACTIVATION)) // len(genotypes.KERNEL_SIZE)) % len(genotypes.UPSAMPLE_PRIMITIVE))
kernel_index = ((model_index // len(genotypes.ACTIVATION)) % len(genotypes.KERNEL_SIZE))
act_index = (model_index % len(genotypes.ACTIVATION))
if ((model_index >= 60) and (model_index <= 62)):
conv_index = 5
' DepthToSpace - Second '
if ((model_index >= 252) and (model_index <= 311)):
swap = True
prim_index = ((model_index - 63) // 63)
model_index = (model_index % 63)
conv_index = (((model_index // len(genotypes.ACTIVATION)) // len(genotypes.KERNEL_SIZE)) % len(genotypes.UPSAMPLE_PRIMITIVE))
kernel_index = ((model_index // len(genotypes.ACTIVATION)) % len(genotypes.KERNEL_SIZE))
act_index = (model_index % len(genotypes.ACTIVATION))
' Transposed Convolution '
if ((model_index >= 312) and (model_index <= 323)):
prim_index = 4
conv_index = 5
kernel_index = ((model_index // len(genotypes.ACTIVATION)) % len(genotypes.KERNEL_SIZE))
act_index = (model_index % len(genotypes.ACTIVATION))
prim_op = genotypes.UPSAMPLE_PRIMITIVE[prim_index]
conv_op = genotypes.UPSAMPLE_CONV[conv_index]
kernel_size = genotypes.KERNEL_SIZE[kernel_index]
act_op = genotypes.ACTIVATION[act_index]
if (prim_op == 'pixel_shuffle'):
if (not swap):
prim_op_layer = operations.UPSAMPLE_PRIMITIVE_OPS[prim_op](C_in=C_in, C_out=C_out, kernel_size=kernel_size, act_op=act_op)
conv_op_layer = operations.UPSAMPLE_CONV_OPS[conv_op](C_in=int((C_in / 4)), C_out=C_out, kernel_size=kernel_size, act_op=act_op)
return nn.Sequential(prim_op_layer, conv_op_layer)
else:
conv_op_layer = operations.UPSAMPLE_CONV_OPS[conv_op](C_in=C_in, C_out=int((C_out * 4)), kernel_size=kernel_size, act_op=act_op)
prim_op_layer = operations.UPSAMPLE_PRIMITIVE_OPS[prim_op](C_in=C_in, C_out=C_out, kernel_size=kernel_size, act_op=act_op)
return nn.Sequential(conv_op_layer, prim_op_layer)
else:
prim_op_layer = operations.UPSAMPLE_PRIMITIVE_OPS[prim_op](C_in=C_in, C_out=C_out, kernel_size=kernel_size, act_op=act_op)
conv_op_layer = operations.UPSAMPLE_CONV_OPS[conv_op](C_in=C_in, C_out=C_out, kernel_size=kernel_size, act_op=act_op)
return nn.Sequential(prim_op_layer, conv_op_layer) |
class EncoderDisp(nn.Module):
def __init__(self, bott_channels, out_channels, bottleneck):
super(EncoderDisp, self).__init__()
self.bottleneck = bottleneck
self.disp = nn.Sequential(nn.Conv2d(bott_channels, out_channels, 3, 1, 1, padding_mode='reflect'), nn.Sigmoid())
def forward(self, inputs):
features = self.bottleneck(inputs)
out = self.disp(features)
return out |
def clipped_audio(x, num_frames=c.NUM_FRAMES):
if (x.shape[0] > num_frames):
bias = np.random.randint(0, (x.shape[0] - num_frames))
clipped_x = x[bias:(num_frames + bias)]
else:
clipped_x = x
return clipped_x |
class ElasticTransformPseudo2D(DualTransform):
def __init__(self, alpha=1000, sigma=50, alpha_affine=1, approximate=False, always_apply=False, p=0.5):
super().__init__(always_apply, p)
self.alpha = alpha
self.sigma = sigma
self.alpha_affine = alpha_affine
self.approximate = approximate
def apply(self, img, random_state=None):
return F.elastic_transform_pseudo2D(img, self.alpha, self.sigma, self.alpha_affine, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, value=None, random_state=random_state, approximate=False)
def apply_to_mask(self, img, random_state=None):
return F.elastic_transform_pseudo2D(img, self.alpha, self.sigma, self.alpha_affine, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101, value=None, random_state=random_state, approximate=False)
def get_params(self, **data):
return {'random_state': random.randint(0, 10000)} |
class ResNet(object):
__shared__ = ['norm_type', 'freeze_norm', 'weight_prefix_name']
def __init__(self, depth=50, freeze_at=2, norm_type='affine_channel', freeze_norm=True, norm_decay=0.0, variant='b', feature_maps=[2, 3, 4, 5], dcn_v2_stages=[], weight_prefix_name='', nonlocal_stages=[], gcb_stages=[], gcb_params=dict(), sac_stages=[], sac_params=None, rfp_stages=[], lr_mult_list=[1.0, 1.0, 1.0, 1.0], eca_k_size=[]):
super(ResNet, self).__init__()
if isinstance(feature_maps, Integral):
feature_maps = [feature_maps]
assert (depth in [18, 34, 50, 101, 152, 200]), 'depth {} not in [18, 34, 50, 101, 152, 200]'
assert (variant in ['a', 'b', 'c', 'd']), 'invalid ResNet variant'
assert (0 <= freeze_at <= 4), 'freeze_at should be 0, 1, 2, 3 or 4'
assert (len(feature_maps) > 0), 'need one or more feature maps'
assert (norm_type in ['bn', 'sync_bn', 'affine_channel'])
assert (not ((len(nonlocal_stages) > 0) and (depth < 50))), 'non-local is not supported for resnet18 or resnet34'
assert (len(lr_mult_list) == 4), 'lr_mult_list length must be 4 but got {}'.format(len(lr_mult_list))
self.depth = depth
self.freeze_at = freeze_at
self.norm_type = norm_type
self.norm_decay = norm_decay
self.freeze_norm = freeze_norm
self.variant = variant
self._model_type = 'ResNet'
self.feature_maps = feature_maps
self.dcn_v2_stages = dcn_v2_stages
self.depth_cfg = {18: ([2, 2, 2, 2], self.basicblock), 34: ([3, 4, 6, 3], self.basicblock), 50: ([3, 4, 6, 3], self.bottleneck), 101: ([3, 4, 23, 3], self.bottleneck), 152: ([3, 8, 36, 3], self.bottleneck), 200: ([3, 12, 48, 3], self.bottleneck)}
self.stage_filters = [64, 128, 256, 512]
self._c1_out_chan_num = 64
self.na = NameAdapter(self)
self.prefix_name = weight_prefix_name
self.nonlocal_stages = nonlocal_stages
self.nonlocal_mod_cfg = {50: 2, 101: 5, 152: 8, 200: 12}
self.gcb_stages = gcb_stages
self.gcb_params = gcb_params
self.sac_stages = sac_stages
self.sac_params = sac_params
self.rfp_stages = rfp_stages
self.with_sac = False
if ((self.sac_params is not None) and (len(self.sac_stages) > 0)):
self.with_sac = True
self.lr_mult_list = lr_mult_list
self.with_eca = (len(eca_k_size) == 4)
self.eca_k_size = eca_k_size
self.stage_num = (- 1)
def _conv_offset(self, input, filter_size, stride, padding, act=None, name=None):
out_channel = ((filter_size * filter_size) * 3)
out = fluid.layers.conv2d(input, num_filters=out_channel, filter_size=filter_size, stride=stride, padding=padding, param_attr=ParamAttr(initializer=Constant(0.0), name=(name + '.w_0')), bias_attr=ParamAttr(initializer=Constant(0.0), name=(name + '.b_0')), act=act, name=name)
return out
def _conv_norm(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None, dcn_v2=False, with_sac=False):
_name = ((self.prefix_name + name) if (self.prefix_name != '') else name)
lr_mult = 1.0
mult_idx = max((self.stage_num - 2), 0)
mult_idx = min((self.stage_num - 2), 3)
lr_mult = self.lr_mult_list[mult_idx]
if with_sac:
conv = SAConv2d(input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=((filter_size - 1) // 2), groups=groups, act=None, bias_attr=False, name=_name, lr_mult=lr_mult, **self.sac_params)
elif (not dcn_v2):
conv = fluid.layers.conv2d(input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=((filter_size - 1) // 2), groups=groups, act=None, param_attr=ParamAttr(name=(_name + '_weights'), learning_rate=lr_mult), bias_attr=False, name=(_name + '.conv2d.output.1'))
else:
offset_mask = self._conv_offset(input=input, filter_size=filter_size, stride=stride, padding=((filter_size - 1) // 2), act=None, name=(_name + '_conv_offset'))
offset_channel = ((filter_size ** 2) * 2)
mask_channel = (filter_size ** 2)
(offset, mask) = fluid.layers.split(input=offset_mask, num_or_sections=[offset_channel, mask_channel], dim=1)
mask = fluid.layers.sigmoid(mask)
conv = fluid.layers.deformable_conv(input=input, offset=offset, mask=mask, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=((filter_size - 1) // 2), groups=groups, deformable_groups=1, im2col_step=1, param_attr=ParamAttr(name=(_name + '_weights'), learning_rate=lr_mult), bias_attr=False, name=(_name + '.conv2d.output.1'))
bn_name = self.na.fix_conv_norm_name(name)
bn_name = ((self.prefix_name + bn_name) if (self.prefix_name != '') else bn_name)
norm_lr = (0.0 if self.freeze_norm else lr_mult)
norm_decay = self.norm_decay
pattr = ParamAttr(name=(bn_name + '_scale'), learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
battr = ParamAttr(name=(bn_name + '_offset'), learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
if (self.norm_type in ['bn', 'sync_bn']):
global_stats = (True if self.freeze_norm else False)
out = fluid.layers.batch_norm(input=conv, act=act, name=(bn_name + '.output.1'), param_attr=pattr, bias_attr=battr, moving_mean_name=(bn_name + '_mean'), moving_variance_name=(bn_name + '_variance'), use_global_stats=global_stats)
scale = fluid.framework._get_var(pattr.name)
bias = fluid.framework._get_var(battr.name)
elif (self.norm_type == 'affine_channel'):
scale = fluid.layers.create_parameter(shape=[conv.shape[1]], dtype=conv.dtype, attr=pattr, default_initializer=fluid.initializer.Constant(1.0))
bias = fluid.layers.create_parameter(shape=[conv.shape[1]], dtype=conv.dtype, attr=battr, default_initializer=fluid.initializer.Constant(0.0))
out = fluid.layers.affine_channel(x=conv, scale=scale, bias=bias, act=act)
if self.freeze_norm:
scale.stop_gradient = True
bias.stop_gradient = True
return out
def _shortcut(self, input, ch_out, stride, is_first, name):
max_pooling_in_short_cut = (self.variant == 'd')
ch_in = input.shape[1]
name = self.na.fix_shortcut_name(name)
std_senet = getattr(self, 'std_senet', False)
if ((ch_in != ch_out) or (stride != 1) or ((self.depth < 50) and is_first)):
if std_senet:
if is_first:
return self._conv_norm(input, ch_out, 1, stride, name=name)
else:
return self._conv_norm(input, ch_out, 3, stride, name=name)
if (max_pooling_in_short_cut and (not is_first)):
input = fluid.layers.pool2d(input=input, pool_size=2, pool_stride=2, pool_padding=0, ceil_mode=True, pool_type='avg')
return self._conv_norm(input, ch_out, 1, 1, name=name)
return self._conv_norm(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck(self, input, num_filters, stride, is_first, name, dcn_v2=False, gcb=False, gcb_name=None, sac=False, rfp=False, rfp_feat=None, eca_k_size=None):
if (self.variant == 'a'):
(stride1, stride2) = (stride, 1)
else:
(stride1, stride2) = (1, stride)
groups = getattr(self, 'groups', 1)
group_width = getattr(self, 'group_width', (- 1))
if (groups == 1):
expand = 4
elif ((groups * group_width) == 256):
expand = 1
else:
num_filters = (num_filters // 2)
expand = 2
(conv_name1, conv_name2, conv_name3, shortcut_name) = self.na.fix_bottleneck_name(name)
std_senet = getattr(self, 'std_senet', False)
if std_senet:
conv_def = [[int((num_filters / 2)), 1, stride1, 'relu', 1, conv_name1], [num_filters, 3, stride2, 'relu', groups, conv_name2], [(num_filters * expand), 1, 1, None, 1, conv_name3]]
else:
conv_def = [[num_filters, 1, stride1, 'relu', 1, conv_name1], [num_filters, 3, stride2, 'relu', groups, conv_name2], [(num_filters * expand), 1, 1, None, 1, conv_name3]]
residual = input
for (i, (c, k, s, act, g, _name)) in enumerate(conv_def):
residual = self._conv_norm(input=residual, num_filters=c, filter_size=k, stride=s, act=act, groups=g, name=_name, dcn_v2=((i == 1) and dcn_v2), with_sac=((i == 1) and sac))
short = self._shortcut(input, (num_filters * expand), stride, is_first=is_first, name=shortcut_name)
if callable(getattr(self, '_squeeze_excitation', None)):
residual = self._squeeze_excitation(input=residual, num_channels=num_filters, name=('fc' + name))
if eca_k_size:
residual = eca_layer(residual, eca_k_size, name=('eca_' + name))
if gcb:
residual = add_gc_block(residual, name=gcb_name, **self.gcb_params)
if (rfp and (rfp_feat is not None)):
out = (short + residual)
rfp_feat = fluid.layers.conv2d(input=rfp_feat, num_filters=(num_filters * expand), filter_size=1, stride=1, padding=0, param_attr=ParamAttr(initializer=Constant(0.0), name=(name + 'rfp.w')), bias_attr=ParamAttr(initializer=Constant(0.0), name=(name + 'rfp.b')), act=None, name=(name + '.rfp.output'))
out = fluid.layers.elementwise_add(x=out, y=rfp_feat, act='relu', name=(name + '.add.output.5'))
else:
out = fluid.layers.elementwise_add(x=short, y=residual, act='relu', name=(name + '.add.output.5'))
return out
def basicblock(self, input, num_filters, stride, is_first, name, dcn_v2=False, gcb=False, gcb_name=None, sac=False, rfp=False, rfp_feat=None, eca_k_size=None):
assert (dcn_v2 is False), 'Not implemented yet.'
assert (gcb is False), 'Not implemented yet.'
assert (sac is False), 'Not implemented yet.'
assert (rfp is False), 'Not implemented yet.'
conv0 = self._conv_norm(input=input, num_filters=num_filters, filter_size=3, act='relu', stride=stride, name=(name + '_branch2a'))
conv1 = self._conv_norm(input=conv0, num_filters=num_filters, filter_size=3, act=None, name=(name + '_branch2b'))
if eca_k_size:
conv1 = eca_layer(conv1, eca_k_size, name=('eca_' + name))
short = self._shortcut(input, num_filters, stride, is_first, name=(name + '_branch1'))
return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
def layer_warp(self, input, stage_num, rfp_feat=None):
assert (stage_num in [2, 3, 4, 5])
self.stage_num = stage_num
(stages, block_func) = self.depth_cfg[self.depth]
count = stages[(stage_num - 2)]
ch_out = self.stage_filters[(stage_num - 2)]
is_first = (False if (stage_num != 2) else True)
dcn_v2 = (True if (stage_num in self.dcn_v2_stages) else False)
sac = (self.with_sac if (stage_num in self.sac_stages) else False)
nonlocal_mod = 1000
if (stage_num in self.nonlocal_stages):
nonlocal_mod = (self.nonlocal_mod_cfg[self.depth] if (stage_num == 4) else 2)
conv = input
for i in range(count):
conv_name = self.na.fix_layer_warp_name(stage_num, count, i)
if (self.depth < 50):
is_first = (True if ((i == 0) and (stage_num == 2)) else False)
gcb = (stage_num in self.gcb_stages)
gcb_name = 'gcb_res{}_b{}'.format(stage_num, i)
eca_k_size = (int(self.eca_k_size[(stage_num - 2)]) if self.with_eca else None)
rfp = ((i == 0) and (stage_num in self.rfp_stages))
conv = block_func(input=conv, num_filters=ch_out, stride=(2 if ((i == 0) and (stage_num != 2)) else 1), is_first=is_first, name=conv_name, dcn_v2=dcn_v2, gcb=gcb, gcb_name=gcb_name, sac=sac, rfp=rfp, rfp_feat=rfp_feat, eca_k_size=eca_k_size)
dim_in = conv.shape[1]
nonlocal_name = 'nonlocal_conv{}'.format(stage_num)
if ((i % nonlocal_mod) == (nonlocal_mod - 1)):
conv = add_space_nonlocal(conv, dim_in, dim_in, (nonlocal_name + '_{}'.format(i)), int((dim_in / 2)))
return conv
def c1_stage(self, input):
out_chan = self._c1_out_chan_num
conv1_name = self.na.fix_c1_stage_name()
if (self.variant in ['c', 'd']):
conv_def = [[(out_chan // 2), 3, 2, 'conv1_1'], [(out_chan // 2), 3, 1, 'conv1_2'], [out_chan, 3, 1, 'conv1_3']]
else:
conv_def = [[out_chan, 7, 2, conv1_name]]
for (c, k, s, _name) in conv_def:
input = self._conv_norm(input=input, num_filters=c, filter_size=k, stride=s, act='relu', name=_name)
output = fluid.layers.pool2d(input=input, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
return output
def __call__(self, input, rfp_feats=None):
assert isinstance(input, Variable)
assert (not (set(self.feature_maps) - set([2, 3, 4, 5]))), 'feature maps {} not in [2, 3, 4, 5]'.format(self.feature_maps)
res_endpoints = []
res = input
feature_maps = self.feature_maps
severed_head = getattr(self, 'severed_head', False)
if (not severed_head):
res = self.c1_stage(res)
feature_maps = range(2, (max(self.feature_maps) + 1))
for i in feature_maps:
rfp_feat = None
if ((i in self.rfp_stages) and (rfp_feats is not None)):
rfp_feat = rfp_feats[(i - self.rfp_stages[0])]
res = self.layer_warp(res, i, rfp_feat=rfp_feat)
if (i in self.feature_maps):
res_endpoints.append(res)
if (self.freeze_at >= i):
res.stop_gradient = True
return OrderedDict([('res{}_sum'.format(self.feature_maps[idx]), feat) for (idx, feat) in enumerate(res_endpoints)]) |
def weights_init_(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0) |
class ROIHeadsTest(unittest.TestCase):
def test_roi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.NAME = 'FastRCNNConvFCHead'
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = 'ROIAlignV2'
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
cfg.MODEL.MASK_ON = True
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {'res4': torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {'res4': ShapeSpec(channels=num_channels, stride=16)}
image_shape = (15, 15)
gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instance0 = Instances(image_shape)
gt_instance0.gt_boxes = Boxes(gt_boxes0)
gt_instance0.gt_classes = torch.tensor([2, 1])
gt_instance0.gt_masks = BitMasks((torch.rand(((2,) + image_shape)) > 0.5))
gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
gt_instance1 = Instances(image_shape)
gt_instance1.gt_boxes = Boxes(gt_boxes1)
gt_instance1.gt_classes = torch.tensor([1, 2])
gt_instance1.gt_masks = BitMasks((torch.rand(((2,) + image_shape)) > 0.5))
gt_instances = [gt_instance0, gt_instance1]
proposal_generator = build_proposal_generator(cfg, feature_shape)
roi_heads = StandardROIHeads(cfg, feature_shape)
with EventStorage():
(proposals, proposal_losses) = proposal_generator(images, features, gt_instances)
(_, detector_losses) = roi_heads(images, features, proposals, gt_instances)
detector_losses.update(proposal_losses)
expected_losses = {'loss_cls': 4., 'loss_box_reg': 0., 'loss_mask': 0., 'loss_rpn_cls': 0., 'loss_rpn_loc': 0.}
succ = all((torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0))) for name in detector_losses.keys()))
self.assertTrue(succ, 'Losses has changed! New losses: {}'.format({k: v.item() for (k, v) in detector_losses.items()}))
def test_rroi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = 'RRPN'
cfg.MODEL.ANCHOR_GENERATOR.NAME = 'RotatedAnchorGenerator'
cfg.MODEL.ROI_HEADS.NAME = 'RROIHeads'
cfg.MODEL.ROI_BOX_HEAD.NAME = 'FastRCNNConvFCHead'
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = 'StandardRPNHead'
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = 'ROIAlignRotated'
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1)
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {'res4': torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {'res4': ShapeSpec(channels=num_channels, stride=16)}
image_shape = (15, 15)
gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instance0 = Instances(image_shape)
gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0)
gt_instance0.gt_classes = torch.tensor([2, 1])
gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, (- 50)]], dtype=torch.float32)
gt_instance1 = Instances(image_shape)
gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1)
gt_instance1.gt_classes = torch.tensor([1, 2])
gt_instances = [gt_instance0, gt_instance1]
proposal_generator = build_proposal_generator(cfg, feature_shape)
roi_heads = build_roi_heads(cfg, feature_shape)
with EventStorage():
(proposals, proposal_losses) = proposal_generator(images, features, gt_instances)
(_, detector_losses) = roi_heads(images, features, proposals, gt_instances)
detector_losses.update(proposal_losses)
expected_losses = {'loss_cls': 4., 'loss_box_reg': 0., 'loss_rpn_cls': 0., 'loss_rpn_loc': 0.}
succ = all((torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0))) for name in detector_losses.keys()))
self.assertTrue(succ, 'Losses has changed! New losses: {}'.format({k: v.item() for (k, v) in detector_losses.items()}))
def test_box_head_scriptability(self):
input_shape = ShapeSpec(channels=1024, height=14, width=14)
box_features = torch.randn(4, 1024, 14, 14)
box_head = FastRCNNConvFCHead(input_shape, conv_dims=[512, 512], fc_dims=[1024, 1024]).eval()
script_box_head = torch.jit.script(box_head)
origin_output = box_head(box_features)
script_output = script_box_head(box_features)
self.assertTrue(torch.equal(origin_output, script_output))
def test_mask_head_scriptability(self):
input_shape = ShapeSpec(channels=1024)
mask_features = torch.randn(4, 1024, 14, 14)
image_shapes = [(10, 10), (15, 15)]
pred_instance0 = Instances(image_shapes[0])
pred_classes0 = torch.tensor([1, 2, 3], dtype=torch.int64)
pred_instance0.pred_classes = pred_classes0
pred_instance1 = Instances(image_shapes[1])
pred_classes1 = torch.tensor([4], dtype=torch.int64)
pred_instance1.pred_classes = pred_classes1
mask_head = MaskRCNNConvUpsampleHead(input_shape, num_classes=80, conv_dims=[256, 256]).eval()
origin_outputs = mask_head(mask_features, deepcopy([pred_instance0, pred_instance1]))
fields = {'pred_masks': torch.Tensor, 'pred_classes': torch.Tensor}
with freeze_training_mode(mask_head), patch_instances(fields) as NewInstances:
sciript_mask_head = torch.jit.script(mask_head)
pred_instance0 = NewInstances.from_instances(pred_instance0)
pred_instance1 = NewInstances.from_instances(pred_instance1)
script_outputs = sciript_mask_head(mask_features, [pred_instance0, pred_instance1])
for (origin_ins, script_ins) in zip(origin_outputs, script_outputs):
assert_instances_allclose(origin_ins, script_ins, rtol=0)
def test_keypoint_head_scriptability(self):
input_shape = ShapeSpec(channels=1024, height=14, width=14)
keypoint_features = torch.randn(4, 1024, 14, 14)
image_shapes = [(10, 10), (15, 15)]
pred_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6], [1, 5, 2, 8]], dtype=torch.float32)
pred_instance0 = Instances(image_shapes[0])
pred_instance0.pred_boxes = Boxes(pred_boxes0)
pred_boxes1 = torch.tensor([[7, 3, 10, 5]], dtype=torch.float32)
pred_instance1 = Instances(image_shapes[1])
pred_instance1.pred_boxes = Boxes(pred_boxes1)
keypoint_head = KRCNNConvDeconvUpsampleHead(input_shape, num_keypoints=17, conv_dims=[512, 512]).eval()
origin_outputs = keypoint_head(keypoint_features, deepcopy([pred_instance0, pred_instance1]))
fields = {'pred_boxes': Boxes, 'pred_keypoints': torch.Tensor, 'pred_keypoint_heatmaps': torch.Tensor}
with freeze_training_mode(keypoint_head), patch_instances(fields) as NewInstances:
script_keypoint_head = torch.jit.script(keypoint_head)
pred_instance0 = NewInstances.from_instances(pred_instance0)
pred_instance1 = NewInstances.from_instances(pred_instance1)
script_outputs = script_keypoint_head(keypoint_features, [pred_instance0, pred_instance1])
for (origin_ins, script_ins) in zip(origin_outputs, script_outputs):
assert_instances_allclose(origin_ins, script_ins, rtol=0)
def test_StandardROIHeads_scriptability(self):
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.NAME = 'FastRCNNConvFCHead'
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = 'ROIAlignV2'
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
cfg.MODEL.MASK_ON = True
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.01
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {'res4': torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {'res4': ShapeSpec(channels=num_channels, stride=16)}
roi_heads = StandardROIHeads(cfg, feature_shape).eval()
proposal0 = Instances(image_sizes[0])
proposal_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
proposal0.proposal_boxes = Boxes(proposal_boxes0)
proposal0.objectness_logits = torch.tensor([0.5, 0.7], dtype=torch.float32)
proposal1 = Instances(image_sizes[1])
proposal_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
proposal1.proposal_boxes = Boxes(proposal_boxes1)
proposal1.objectness_logits = torch.tensor([0.1, 0.9], dtype=torch.float32)
proposals = [proposal0, proposal1]
(pred_instances, _) = roi_heads(images, features, proposals)
fields = {'objectness_logits': torch.Tensor, 'proposal_boxes': Boxes, 'pred_classes': torch.Tensor, 'scores': torch.Tensor, 'pred_masks': torch.Tensor, 'pred_boxes': Boxes, 'pred_keypoints': torch.Tensor, 'pred_keypoint_heatmaps': torch.Tensor}
with freeze_training_mode(roi_heads), patch_instances(fields) as new_instances:
proposal0 = new_instances.from_instances(proposal0)
proposal1 = new_instances.from_instances(proposal1)
proposals = [proposal0, proposal1]
scripted_rot_heads = torch.jit.script(roi_heads)
(scripted_pred_instances, _) = scripted_rot_heads(images, features, proposals)
for (instance, scripted_instance) in zip(pred_instances, scripted_pred_instances):
assert_instances_allclose(instance, scripted_instance, rtol=0)
def test_PointRend_mask_head_tracing(self):
cfg = model_zoo.get_config('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml')
point_rend.add_pointrend_config(cfg)
cfg.MODEL.ROI_HEADS.IN_FEATURES = ['p2', 'p3']
cfg.MODEL.ROI_MASK_HEAD.NAME = 'PointRendMaskHead'
cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ''
cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = True
chan = 256
head = point_rend.PointRendMaskHead(cfg, {'p2': ShapeSpec(channels=chan, stride=4), 'p3': ShapeSpec(channels=chan, stride=8)})
def gen_inputs(h, w, N):
p2 = torch.rand(1, chan, h, w)
p3 = torch.rand(1, chan, (h // 2), (w // 2))
boxes = random_boxes(N, max_coord=h)
return (p2, p3, boxes)
class Wrap(nn.ModuleDict):
def forward(self, p2, p3, boxes):
features = {'p2': p2, 'p3': p3}
inst = Instances(((p2.shape[2] * 4), (p2.shape[3] * 4)))
inst.pred_boxes = Boxes(boxes)
inst.pred_classes = torch.zeros(inst.__len__(), dtype=torch.long)
out = self.head(features, [inst])[0]
return out.pred_masks
model = Wrap({'head': head})
model.eval()
with torch.no_grad(), patch_builtin_len():
traced = torch.jit.trace(model, gen_inputs(302, 208, 20))
inputs = gen_inputs(100, 120, 30)
out_eager = model(*inputs)
out_trace = traced(*inputs)
self.assertTrue(torch.allclose(out_eager, out_trace)) |
def load_flax_weights_in_pytorch_model(pt_model, flax_state):
try:
import torch
except ImportError:
logger.error('Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see and for installation instructions.')
raise
is_type_bf16 = flatten_dict(jax.tree_util.tree_map((lambda x: (x.dtype == jnp.bfloat16)), flax_state)).values()
if any(is_type_bf16):
logger.warning('Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` before loading those in PyTorch model.')
flax_state = jax.tree_util.tree_map((lambda params: (params.astype(np.float32) if (params.dtype == jnp.bfloat16) else params)), flax_state)
pt_model.base_model_prefix = ''
flax_state_dict = flatten_dict(flax_state, sep='.')
pt_model_dict = pt_model.state_dict()
unexpected_keys = []
missing_keys = set(pt_model_dict.keys())
for (flax_key_tuple, flax_tensor) in flax_state_dict.items():
flax_key_tuple_array = flax_key_tuple.split('.')
if ((flax_key_tuple_array[(- 1)] == 'kernel') and (flax_tensor.ndim == 4)):
flax_key_tuple_array = (flax_key_tuple_array[:(- 1)] + ['weight'])
flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
elif (flax_key_tuple_array[(- 1)] == 'kernel'):
flax_key_tuple_array = (flax_key_tuple_array[:(- 1)] + ['weight'])
flax_tensor = flax_tensor.T
elif (flax_key_tuple_array[(- 1)] == 'scale'):
flax_key_tuple_array = (flax_key_tuple_array[:(- 1)] + ['weight'])
if ('time_embedding' not in flax_key_tuple_array):
for (i, flax_key_tuple_string) in enumerate(flax_key_tuple_array):
flax_key_tuple_array[i] = flax_key_tuple_string.replace('_0', '.0').replace('_1', '.1').replace('_2', '.2').replace('_3', '.3').replace('_4', '.4').replace('_5', '.5').replace('_6', '.6').replace('_7', '.7').replace('_8', '.8').replace('_9', '.9')
flax_key = '.'.join(flax_key_tuple_array)
if (flax_key in pt_model_dict):
if (flax_tensor.shape != pt_model_dict[flax_key].shape):
raise ValueError(f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.')
else:
flax_tensor = (np.asarray(flax_tensor) if (not isinstance(flax_tensor, np.ndarray)) else flax_tensor)
pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
missing_keys.remove(flax_key)
else:
unexpected_keys.append(flax_key)
pt_model.load_state_dict(pt_model_dict)
missing_keys = list(missing_keys)
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the Flax model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).
- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model).''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly initialized: {missing_keys}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
return pt_model |
class LMTrainer(Trainer):
def __init__(self, opt, model, data):
super().__init__(opt, model, data)
self.crit = nn.CrossEntropyLoss(size_average=True, reduce=True, ignore_index=0)
self.crit_test = nn.CrossEntropyLoss(size_average=False, reduce=False, ignore_index=0)
self.opt = opt
self.model = model
self.train_bag = data[0]
self.test_bag = data[1]
self.n_batch = len(self.train_bag)
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=opt.lr, momentum=0.9)
self.word_dict = opt.word_dict
self.clip = opt.clip
def func_train(self, inp_var, inp_msk):
self.optimizer.zero_grad()
aux = {}
batch_size = inp_var.size()[0]
batch_size_ = len(inp_msk)
assert (batch_size == batch_size_)
target_len = inp_msk[0]
(decoder_outputs_prob, decoder_outputs) = self.model.forward(inp_var, inp_msk, inp_var, inp_msk, aux)
valid_pos_mask = Var(msk_list_to_mat(inp_msk), requires_grad=False).view((target_len * batch_size), 1)
if self.opt.use_cuda:
valid_pos_mask = valid_pos_mask.cuda()
pred_prob = decoder_outputs_prob.view((target_len * batch_size), (- 1))
seq_first_inp_var = inp_var.transpose(1, 0).contiguous()
gold_dist = seq_first_inp_var.view((target_len * batch_size))
gold_dist = Var(gold_dist)
if self.opt.use_cuda:
gold_dist = gold_dist.cuda()
loss = self.crit(pred_prob, gold_dist)
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
self.optimizer.step()
return (loss.data[0], math.exp(loss.data[0]))
def func_test(self, inp_var, inp_msk):
target_len = inp_msk[0]
batch_size = inp_var.size()[0]
(decoder_outputs_prob, decoder_outputs) = self.model.forward(inp_var, inp_msk, tgt_var=inp_var, tgt_msk=inp_msk, aux=None)
pred_prob = decoder_outputs_prob.view((target_len * batch_size), (- 1))
seq_first_inp_var = inp_var.transpose(1, 0).contiguous()
gold_dist = Var(seq_first_inp_var.view((target_len * batch_size)))
if self.opt.use_cuda:
gold_dist = gold_dist.cuda()
loss = self.crit_test(pred_prob, gold_dist)
loss = torch.sum(loss)
return (loss.data[0], decoder_outputs)
def train_iters(self):
for epo in range(self.opt.start_epo, (self.opt.n_epo + 1)):
self.model.train()
batch_order = np.arange(self.n_batch)
np.random.shuffle(batch_order)
for (idx, batch_idx) in enumerate(batch_order):
current_batch = self.train_bag[batch_idx]
inp_var = current_batch['txt']
inp_msk = current_batch['txt_msk']
if self.opt.use_cuda:
inp_var = inp_var.contiguous().cuda()
(nll, ppl) = self.func_train(inp_var, inp_msk)
if ((idx % self.opt.print_every) == 0):
logging.info(('NLL:%.2f \tPPL:%s' % (nll, str(ppl))))
if ((idx % self.opt.save_every) == 0):
ppl = self.evaluate()
os.chdir(self.opt.save_dir)
name_string = ('%d_%.2f'.lower() % (epo, ppl))
logging.info(('Saving in epo %s' % name_string))
torch.save(self.model.emb.state_dict(), (name_string + '_emb'))
torch.save(self.model.dec.state_dict(), (name_string + '_dec'))
torch.save(self.model.opt, (name_string + '_opt'))
os.chdir('..')
def evaluate(self):
self.model.eval()
n_batch = len(self.test_bag)
test_len = 0
accumulated_ppl = 0
for idx in range(n_batch):
current_batch = self.test_bag[idx]
inp_var = current_batch['txt']
inp_mask = current_batch['txt_msk']
batch_size = inp_var.size()[0]
test_len += (inp_mask[0] * batch_size)
(nll, decoder_output) = self.func_test(inp_var, inp_mask)
accumulated_ppl += nll
final_ppl = (accumulated_ppl / test_len)
final_ppl = math.exp(final_ppl)
logging.info(('PPL: %f' % final_ppl))
return final_ppl |
class _DefaultHabitatSimActions(Enum):
STOP = 0
MOVE_FORWARD = 1
TURN_LEFT = 2
TURN_RIGHT = 3
LOOK_UP = 4
LOOK_DOWN = 5 |
class SquadFeatures(object):
def __init__(self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible |
class DiscLossWGANGP(DiscLossLS):
def name(self):
return 'DiscLossWGAN-GP'
def __init__(self, opt, tensor):
super(DiscLossWGANGP, self).__init__(opt, tensor)
self.LAMBDA = 10
def get_g_loss(self, net, realA, fakeB):
self.D_fake = net.forward(fakeB)
return (- self.D_fake.mean())
def calc_gradient_penalty(self, netD, real_data, fake_data):
alpha = torch.rand(1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
interpolates = ((alpha * real_data) + ((1 - alpha) * fake_data))
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD.forward(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size()).cuda(), create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = (((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA)
return gradient_penalty
def get_loss(self, net, realA, fakeB, realB):
self.D_fake = net.forward(fakeB.detach())
self.D_fake = self.D_fake.mean()
self.D_real = net.forward(realB)
self.D_real = self.D_real.mean()
self.loss_D = (self.D_fake - self.D_real)
gradient_penalty = self.calc_gradient_penalty(net, realB.data, fakeB.data)
return (self.loss_D + gradient_penalty) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, AdapterTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args, adapter_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args, adapter_args) = parser.parse_args_into_dataclasses()
training_args.predict_with_generate = True
wandb.init(entity='lklab_kaist', project='ROE_experiments_ICLR', name=training_args.output_dir)
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
print('#### last_checkpoint ', last_checkpoint)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
pass
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
config = T5Config.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
config.train_task_adapters = adapter_args.train_task_adapters
config.prefix_tuning = adapter_args.prefix_tuning
config.attn_prefix_tuning = model_args.attn_prefix_tuning
config.attn_method = model_args.attn_method
config.ignore_target = model_args.ignore_target
config.shared_attn = model_args.shared_attn
config.prefix_num = model_args.prefix_num
config.num_target = len(data_args.task_name)
config.temperature = model_args.temperature
config.fix_attention = model_args.fix_attention
adapter_config = get_adapter_config(adapter_args, data_args, training_args, config)
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = T5ForConditionalGeneration.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), adapter_config=adapter_config)
if (model_args.load_prefix_embeddings is True):
if (model_args.prompt_embedding_path is None):
for (name, param) in model.named_parameters():
if (('prefix_shared' in name) or ('prefix' in name)):
shared_params = [param]
else:
shared_params = []
for path in model_args.prompt_embedding_path:
shared_param = torch.load(path)
shared_params.append(shared_param)
if (model_args.target_prompt_embedding_path is not None):
target_prompt_embedding = torch.load(model_args.target_prompt_embedding_path)
if (model_args.attn_prefix_tuning is True):
if ((training_args.do_train is True) and (model_args.shared_attn is False)):
model.store_prefix_weights(shared_params)
model.update_prefix_weights_single(shared_params[0])
elif ((training_args.do_train is True) and (model_args.shared_attn is True)):
model.store_prefix_weights(shared_params)
model.update_prefix_weights_multi(shared_params[0], num_target=config.num_target)
else:
model.store_prefix_weights(shared_params)
model.update_prefix_weights_single(target_prompt_embedding)
elif (model_args.target_prompt_embedding_path is None):
model.update_prefix_weights(shared_params)
else:
model.update_prefix_weights(shared_params, target_prompt_embedding)
if ((model_args.load_attention is True) and (model_args.attn_path is not None)):
model.update_attention_weights(torch.load(model_args.attn_path))
if ((model_args.load_attention is True) and (model_args.attn_path_sub is not None)):
model.update_attention_weights_sub(model_args.attn_path_sub)
if ((model_args.load_layer_norm is True) and (model_args.layer_norm_dir is not None)):
model.update_layer_norm_weights(model_args.layer_norm_dir)
model.resize_token_embeddings(len(tokenizer))
model = modify_model_after_init(model, training_args, adapter_args, adapter_config)
model_args.load_adapter_weights = True
if model_args.load_adapter_weights:
adapter_params = {}
lst = os.listdir(os.path.join(training_args.output_dir, 'adapter_params'))
for path in lst:
full_path = os.path.join(training_args.output_dir, 'adapter_params', path)
params = torch.load(full_path)
path_ = path.split('.')
path = '.'.join(path_[:(- 1)])
adapter_params[path] = params
load_cnt = 0
for (name, param) in model.named_parameters():
if param.requires_grad:
load_cnt += 1
param.data = adapter_params[name].cuda()
print(f'load count: {load_cnt}')
print(f'Finished loading {len(adapter_params)} number of adapter parameter files')
data_args.dataset_name = data_args.task_name
data_args.eval_dataset_name = data_args.task_name
data_args.test_dataset_name = data_args.test_dataset_name
data_args.dataset_config_name = data_args.dataset_config_name
data_args.eval_dataset_config_name = data_args.dataset_config_name
data_args.test_dataset_config_name = data_args.test_dataset_config_name
data_args.eval_prompts = data_args.train_prompts
assert (len(data_args.dataset_name) == len(data_args.dataset_config_name))
if (data_args.eval_dataset_name is not None):
assert (len(data_args.eval_dataset_name) == len(data_args.eval_dataset_config_name))
if (data_args.test_dataset_name is not None):
assert (len(data_args.test_dataset_name) == len(data_args.test_dataset_config_name))
padding = ('max_length' if data_args.pad_to_max_length else False)
def preprocess_function(examples, max_target_length, task_id=None):
model_inputs = tokenizer(examples['source'], max_length=data_args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(examples['target'], max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
model_inputs['extra_fields'] = examples['extra_fields']
if (task_id is not None):
model_inputs['task_ids'] = [task_id for _ in examples['extra_fields']]
return model_inputs
column_names = ['source', 'target', 'extra_fields']
performance_metrics = {}
eval_metrics_dict = {((dataset_name + '*') + eval_prompt): AutoTask.get(dataset_name, dataset_config_name, prompt=eval_prompt).metric for (dataset_name, dataset_config_name, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)}
print('')
print(data_args.eval_dataset_name)
print()
print(data_args.eval_dataset_config_name)
print()
print(eval_metrics_dict)
print('')
training_args.do_train = False
if training_args.do_train:
if (data_args.train_files is not None):
train_datasets = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt, seed=data_args.data_seed).get(split='train', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_train_samples, lang=data_args.lang_name, file_name=train_file) for (dataset_name, dataset_config_name, train_file, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_files, data_args.train_prompts)]
for td in train_datasets:
print('')
print(len(td))
print('')
else:
train_datasets = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt, seed=data_args.data_seed).get(split='train', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_train_samples, lang=data_args.lang_name, file_name=data_args.train_file) for (dataset_name, dataset_config_name, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_prompts)]
for td in train_datasets:
print('')
print(len(td))
print('')
max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_prompts)]
for (i, train_dataset) in enumerate(train_datasets):
if (model_args.shared_attn is True):
train_datasets[i] = train_datasets[i].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[i], task_id=i), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
else:
print('')
print(len(train_datasets[i]))
print('')
train_datasets[i] = train_datasets[i].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[i]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
print('')
print(len(train_datasets[i]))
print('')
print('')
print(len(train_dataset))
print('')
train_dataset = concatenate_datasets(train_datasets)
print('')
print(len(train_dataset))
print('')
training_args.do_eval = True
data_args.max_val_samples = 300
if training_args.do_eval:
max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=eval_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)]
if (data_args.validation_files is not None):
eval_datasets = {((eval_dataset + '*') + eval_prompt): AutoTask.get(eval_dataset, eval_dataset_config, prompt=eval_prompt, seed=data_args.data_seed).get(split='validation', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_val_samples, lang=data_args.lang_name, file_name=validation_file) for (eval_dataset, eval_dataset_config, validation_file, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.validation_files, data_args.eval_prompts)}
else:
eval_datasets = {((eval_dataset + '*') + eval_prompt): AutoTask.get(eval_dataset, eval_dataset_config, prompt=eval_prompt, seed=data_args.data_seed).get(split='validation', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_val_samples, lang=data_args.lang_name, file_name=data_args.validation_file) for (eval_dataset, eval_dataset_config, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)}
for (k, name) in enumerate(eval_datasets):
if (name == 'lama_fill_mask'):
max_target_lengths[k] = 2
elif (name == 'lambada_what comes next'):
max_target_lengths[k] = 1
if (model_args.shared_attn is True):
eval_datasets[name] = eval_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k], task_id=k), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
else:
eval_datasets[name] = eval_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_test:
if (data_args.test_files is not None):
test_datasets = {test_dataset: AutoTask.get(test_dataset, test_dataset_config, prompt=test_prompt, seed=data_args.data_seed).get(split='test', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_test_samples, lang=data_args.lang_name, file_name=test_file) for (test_dataset, test_dataset_config, test_file, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_files, data_args.test_prompts)}
else:
test_datasets = {test_dataset: AutoTask.get(test_dataset, test_dataset_config, prompt=test_prompt, seed=data_args.data_seed).get(split='test', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_test_samples, lang=data_args.lang_name, file_name=data_args.test_file) for (test_dataset, test_dataset_config, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_prompts)}
max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=test_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_prompts)]
for (k, name) in enumerate(test_datasets):
if (name == 'lama'):
max_target_lengths[k] = 2
elif (name == 'lambada'):
max_target_lengths[k] = 1
if (model_args.shared_attn is True):
test_datasets[name] = test_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k], task_id=k), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
else:
test_datasets[name] = test_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = TaskDataCollatorForSeq2Seq(tokenizer, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
if training_args.do_eval:
data_info = {'eval': eval_datasets[((data_args.eval_dataset_name[0] + '*') + data_args.eval_prompts[0])]['extra_fields'], 'test': (test_datasets[((data_args.test_dataset_name[0] + '*') + data_args.test_prompts[0])]['extra_fields'] if training_args.do_test else None), 'train': (train_dataset['extra_fields'] if training_args.do_train else None)}
else:
data_info = {'train': (train_dataset['extra_fields'] if training_args.do_train else None)}
def compute_metrics(eval_preds, task_name):
print('')
print(eval_preds)
print('')
(preds, labels, data_info) = eval_preds
post_processor = AutoPostProcessor.get(task_name, tokenizer, data_args.ignore_pad_token_for_loss)
(decoded_preds, decoded_labels) = post_processor.process(preds, labels, data_info)
result = {}
eval_metrics = eval_metrics_dict[task_name]
for metric in eval_metrics:
result.update(metric(decoded_preds, decoded_labels))
with open(f"./output_logs_verbalizer/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}*{data_args.train_prompts[0].replace('/', ' ').replace('-', ' ')}-{task_name.replace('/', ' ').replace('-', ' ')}.txt", 'a') as f:
f.write('\n')
f.write(task_name)
f.write('\n')
for (a, b) in zip(decoded_preds, decoded_labels):
f.write(a)
f.write(' ')
f.write(b)
f.write('\n')
f.write('>> ')
for (key, value) in result.items():
f.write((((str(key) + ' : ') + str(value)) + ' | '))
f.write('\n')
f.write('\n')
return result
if (model_args.attn_learning_rate is not None):
all_parameters = set(model.parameters())
attn_params = []
for (name, param) in model.named_parameters():
if ((name == 'encoder.attn_W_up') or (name == 'encoder.attn_W_down') or (name == 'encoder.layer_norm')):
attn_params += list(param)
attn_params = set(attn_params)
non_attn_params = (all_parameters - attn_params)
non_attn_params = list(non_attn_params)
attn_params = list(attn_params)
optim = AdamW([{'params': non_attn_params}, {'params': attn_params, 'lr': model_args.attn_learning_rate}], lr=training_args.learning_rate)
scheduler = get_linear_schedule_with_warmup(optim, num_warmup_steps=training_args.warmup_steps, num_training_steps=((len(train_dataset) * training_args.num_train_epochs) // (training_args.gradient_accumulation_steps * training_args.per_device_train_batch_size)))
trainer = Seq2SeqTrainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_datasets=(eval_datasets if training_args.do_eval else None), data_info=data_info, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, evaluation_metrics=eval_metrics_dict, shared=model_args.shared_attn, optimizers=(optim, scheduler))
else:
trainer = Seq2SeqTrainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_datasets=(eval_datasets if training_args.do_eval else None), data_info=data_info, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, evaluation_metrics=eval_metrics_dict, shared=model_args.shared_attn)
if training_args.do_eval:
print('')
print(eval_datasets)
print('')
if trainer.is_world_process_zero():
os.makedirs(training_args.output_dir, exist_ok=True)
save_training_config(sys.argv[1], training_args.output_dir)
model_args.save_adapter_weights = False
if model_args.save_adapter_weights:
params_to_save = {}
unfrozen_layers = 0
for (name, param) in trainer.model.named_parameters():
if (param.requires_grad == True):
print(name)
params_to_save[name] = 0
unfrozen_layers += 1
print(f'number of unfrozen layers (for beginning of training model): {unfrozen_layers}')
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
if training_args.compute_time:
torch.cuda.synchronize()
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
train_result = trainer.train(resume_from_checkpoint=checkpoint)
if training_args.compute_time:
end.record()
torch.cuda.synchronize()
total_time = (start.elapsed_time(end) / (1000 * 60))
performance_metrics.update({'total_time in minutes ': total_time})
if model_args.save_adapter_weights:
if (not os.path.exists(os.path.join(training_args.output_dir, 'adapter_params'))):
os.mkdir(os.path.join(training_args.output_dir, 'adapter_params'))
layer_cnt = 0
for (name, param) in trainer.model.named_parameters():
if (name in params_to_save):
save_path = os.path.join(training_args.output_dir, f'adapter_params/{name}.pt')
print(name)
torch.save(param, save_path)
layer_cnt += 1
print(f'finished saving adapters! saved {layer_cnt} number of layers')
exit()
if model_args.save_prefix_only:
for (name, param) in trainer.model.named_parameters():
if ((model_args.attn_prefix_tuning is False) and (('prefix_shared' in name) or ('prefix' in name))):
shared_params = param
torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt'))
elif ((model_args.attn_prefix_tuning is True) and (name == 'prefix_shared')):
shared_params = param
if (model_args.shared_attn is True):
for i in range(config.num_target):
torch.save(shared_params[i], os.path.join(training_args.output_dir, 'prefix_embeddings_{}.pt'.format(i)))
else:
torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_Wa.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_Wa_weights.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_down.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_W_down.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_up.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_W_up.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'layer_norm_weight.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.bias' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'layer_norm_bias.pt'))
else:
trainer.save_model()
train_metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
train_metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', train_metrics)
trainer.save_metrics('train', train_metrics)
if (not model_args.save_prefix_only):
trainer.save_state()
if (torch.cuda.is_available() and training_args.compute_memory):
peak_memory = ((torch.cuda.max_memory_allocated() / (1024 ** 2)) / 1000)
print('Memory utilization', peak_memory, 'GB')
performance_metrics.update({'peak_memory': peak_memory})
if (training_args.compute_memory or training_args.compute_time):
trainer.save_metrics('performance', performance_metrics)
if ((model_args.shared_attn is True) and (model_args.ignore_target is False)):
learned_embeddings = trainer.model.encoder.prefix_emb.clone().detach()
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
if (model_args.shared_attn is True):
for (task, eval_dataset) in eval_datasets.items():
metrics = trainer.evaluate(eval_dataset=eval_dataset, max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, task=task)
trainer.log_metrics(f'eval_{task}_', metrics)
trainer.save_metrics(f"eval_{task.replace('/', ' ')}_", metrics)
if training_args.wandb_log:
wandb.log({f'eval_{task}_': metrics})
else:
for (task, eval_dataset) in eval_datasets.items():
print('')
print(task)
print('')
metrics = trainer.evaluate(eval_dataset=eval_dataset, max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, task=task)
trainer.log_metrics(f'eval_{task}_', metrics)
trainer.save_metrics(f"eval_{task.replace('/', ' ')}_", metrics)
if training_args.wandb_log:
wandb.log({f'eval_{task}_': metrics})
if model_args.save_prefix_only:
checkpoints = glob.glob(os.path.join(training_args.output_dir, 'checkpoint-*'))
for checkpoint_dir in checkpoints:
if (not os.path.exists(os.path.join(checkpoint_dir, 'pytorch_model.bin'))):
continue
checkpoint_model = torch.load(os.path.join(os.path.join(checkpoint_dir, 'pytorch_model.bin')))
new_dir = '{}_prompt_only'.format(checkpoint_dir)
os.mkdir(new_dir)
for (name, param) in checkpoint_model.items():
if ((model_args.attn_prefix_tuning is False) and (('prefix_shared' in name) or ('prefix' in name))):
shared_params = param
torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt'))
elif ((model_args.attn_prefix_tuning is True) and (name == 'prefix_shared')):
shared_params = param
if (model_args.shared_attn is True):
for i in range(config.num_target):
torch.save(shared_params[i], os.path.join(new_dir, 'prefix_embeddings_{}.pt'.format(i)))
else:
torch.save(shared_params, os.path.join(new_dir, 'prefix_embeddings.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_Wa.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'attn_Wa_weights.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_down.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'attn_W_down.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_up.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'attn_W_up.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'layer_norm_weight.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.bias' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'layer_norm_bias.pt'))
try:
shutil.rmtree(checkpoint_dir)
except OSError as e:
print(('Error: %s : %s' % (checkpoint_dir, e.strerror)))
if training_args.do_test:
logger.info('*** Test ***')
if (model_args.shared_attn is True):
for (idx, (task, test_dataset)) in enumerate(test_datasets.items()):
trainer.model.encoder.prefix_emb[0].data = learned_embeddings[idx]
metrics = trainer.evaluate(eval_dataset=test_dataset, max_length=data_args.test_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='test', task=task)
trainer.log_metrics(f'test_{task}_', metrics)
trainer.save_metrics(f"test_{task.replace('/', ' ')}_", metrics)
else:
for (task, test_dataset) in test_datasets.items():
metrics = trainer.evaluate(eval_dataset=test_dataset, max_length=data_args.test_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='test', task=task)
trainer.log_metrics(f'test_{task}_', metrics)
trainer.save_metrics(f"test_{task.replace('/', ' ')}_", metrics)
return results |
class AtariLstmModel(torch.nn.Module):
def __init__(self, image_shape, output_size, fc_sizes=512, lstm_size=512, use_maxpool=False, channels=None, kernel_sizes=None, strides=None, paddings=None):
super().__init__()
self.conv = Conv2dHeadModel(image_shape=image_shape, channels=(channels or [16, 32]), kernel_sizes=(kernel_sizes or [8, 4]), strides=(strides or [4, 2]), paddings=(paddings or [0, 1]), use_maxpool=use_maxpool, hidden_sizes=fc_sizes)
self.lstm = torch.nn.LSTM(((self.conv.output_size + output_size) + 1), lstm_size)
self.pi = torch.nn.Linear(lstm_size, output_size)
self.value = torch.nn.Linear(lstm_size, 1)
def forward(self, image, prev_action, prev_reward, init_rnn_state):
img = image.type(torch.float)
img = img.mul_((1.0 / 255))
(lead_dim, T, B, img_shape) = infer_leading_dims(img, 3)
fc_out = self.conv(img.view((T * B), *img_shape))
lstm_input = torch.cat([fc_out.view(T, B, (- 1)), prev_action.view(T, B, (- 1)), prev_reward.view(T, B, 1)], dim=2)
init_rnn_state = (None if (init_rnn_state is None) else tuple(init_rnn_state))
(lstm_out, (hn, cn)) = self.lstm(lstm_input, init_rnn_state)
pi = F.softmax(self.pi(lstm_out.view((T * B), (- 1))), dim=(- 1))
v = self.value(lstm_out.view((T * B), (- 1))).squeeze((- 1))
(pi, v) = restore_leading_dims((pi, v), lead_dim, T, B)
next_rnn_state = RnnState(h=hn, c=cn)
return (pi, v, next_rnn_state) |
class DiscLoss():
def name(self):
return 'DiscLoss'
def __init__(self, opt, tensor):
self.criterionGAN = GANLoss(use_l1=False, tensor=tensor)
self.fake_AB_pool = ImagePool(opt.pool_size)
def get_g_loss(self, net, realA, fakeB):
pred_fake = net.forward(fakeB)
return self.criterionGAN(pred_fake, 1)
def get_loss(self, net, realA, fakeB, realB):
self.pred_fake = net.forward(fakeB.detach())
self.loss_D_fake = self.criterionGAN(self.pred_fake, 0)
self.pred_real = net.forward(realB)
self.loss_D_real = self.criterionGAN(self.pred_real, 1)
self.loss_D = ((self.loss_D_fake + self.loss_D_real) * 0.5)
return self.loss_D |
def translate_and_store_swda_corpus_test_data(dataset, dataset_loading_function, dataset_file_path, translation_file_path, language, translate_whole_utterances=True):
(talks_read, talk_names, _, _) = dataset_loading_function(dataset_file_path)
if (dataset == 'MRDA'):
test_set_idx = mrda_test_set_idx
elif (dataset == 'SwDA'):
test_set_idx = swda_test_set_idx
else:
print('Unknown dataset!')
exit(0)
if translate_whole_utterances:
unit_str = 'u'
else:
unit_str = 'w'
print(('# of talks read:' + str(len(talks_read))))
for i in range(len(talks_read)):
if (talk_names[i] in test_set_idx):
if translate_whole_utterances:
(talk_read, talk_name) = translate_test_data_by_utterances([talks_read[i]], [talk_names[i]], test_set_idx, language)
else:
(talk_read, talk_name) = translate_test_data_by_words([talks_read[i]], [talk_names[i]], test_set_idx, language)
talk_read = talk_read[0]
talk_name = talk_name[0]
print(('Storing file: %s' % talk_names[i]))
fileName = ((((((translation_file_path + talk_name) + '_') + language) + '_') + unit_str) + '.txt')
print(fileName)
f = open(fileName, 'w')
conversation = talk_read[0]
f.write((str(len(conversation)) + '\n'))
for utterance in conversation:
f.write((str(len(utterance)) + '\n'))
utterance_string = ' '.join(utterance)
f.write((utterance_string + '\n'))
tags = talks_read[i][1]
for tag in tags:
f.write((str(tag) + '\n'))
f.close() |
class PlotOut(Widget):
def __init__(self, plots, plot_parameters, **kwargs):
super().__init__(**kwargs)
self.out = widgets.Output(layout={'border': '1px solid black', 'min_width': '300px', 'min_height': '300px', 'max_height': '600px', 'width': 'auto', 'height': 'auto', 'overflow': 'scroll'})
self.plot_selection_widget = DropdownSelectionWidget(plots, plots[0], 'Plots: ', plot_parameters)
self.title = widgets.HTML(value='<H3>Plots</H3>')
self.alpha_level = AlphaLevelWidget()
def show(self):
return widgets.VBox([self.title, self.out, self.plot_selection_widget.widget, self.alpha_level.show()])
def get_current_values(self):
return (self.plot_selection_widget.get_value(), self.alpha_level.get_alpha_value(), self.plot_selection_widget.get_parameter_values())
def get_output(self):
return self.out |
class GPUFPSer():
def __init__(self, proj_name, args, pth_path):
super(GPUFPSer, self).__init__()
self.args = args
self.to_pil = transforms.ToPILImage()
self.proj_name = proj_name
self.dev = torch.device('cuda:0')
self.net = self.args[proj_name]['net']().to(self.dev)
self.net.eval()
self.test_image_transform = transforms.Compose([transforms.Resize((self.args['new_size'], self.args['new_size']), interpolation=Image.BILINEAR), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
if (pth_path != None):
print(f'...{pth_path}')
checkpoint = torch.load(pth_path)
model_dict = self.net.state_dict()
pretrained_dict = {k: v for (k, v) in checkpoint.items() if (k in model_dict)}
model_dict.update(pretrained_dict)
self.net.load_state_dict(model_dict)
print('...')
else:
print('')
def test(self, data_path, save_path):
if save_path:
print(f'{save_path}')
check_mkdir(save_path)
print(f'...{data_path}')
img_path = os.path.join(data_path, 'Image')
img_list = os.listdir(img_path)
total_time = 0
tqdm_iter = tqdm(enumerate(img_list), total=len(img_list), leave=False)
for (idx, img_name) in tqdm_iter:
tqdm_iter.set_description(f'{self.proj_name}:te=>{(idx + 1)}')
img_fullpath = os.path.join(img_path, img_name)
test_image = Image.open(img_fullpath).convert('RGB')
img_size = test_image.size
test_image = self.test_image_transform(test_image)
test_image = test_image.unsqueeze(0)
test_image = test_image.to(self.dev)
with torch.no_grad():
torch.cuda.synchronize()
start_time = time.time()
outputs = self.net(test_image)
torch.cuda.synchronize()
total_time += (time.time() - start_time)
if save_path:
outputs_np = outputs.squeeze(0).cpu().detach()
out_img = self.to_pil(outputs_np).resize(img_size, Image.NEAREST)
oimg_path = os.path.join(save_path, (img_name[:(- 4)] + '.png'))
out_img.save(oimg_path)
fps = (len(img_list) / total_time)
return fps |
def main_worker(gpu, args):
args.gpu = gpu
args.rank = gpu
print(f'Process Launching at GPU {gpu}')
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl')
if args.test_only:
print(f'Building submit test loader at GPU {gpu}')
split = f'submit_{gpu}'
print('Loading', split)
test_loader = get_loader(args, split=split, mode='val', batch_size=args.batch_size, distributed=False, gpu=args.gpu, workers=4, topk=args.valid_topk)
train_loader = None
val_loader = None
trainer = Trainer(args, train_loader, val_loader, test_loader, train=False)
dump_path = os.path.join(args.output, f'submit.json')
trainer.predict(test_loader, dump_path=dump_path)
if ('t5' in args.backbone):
project_name = 'VLT5_GQA'
elif ('bart' in args.backbone):
project_name = 'VLBart_GQA'
wandb.init(project=project_name)
wandb.run.name = args.run_name
wandb.config.update(args)
src_dir = Path(__file__).resolve().parent
base_path = str(src_dir.parent)
src_dir = str(src_dir)
wandb.save(os.path.join((src_dir + '/*.py')), base_path=base_path)
wandb.save(dump_path, base_path=args.output)
print('Uploaded', dump_path)
else:
print(f'Building train loader at GPU {gpu}')
train_loader = get_loader(args, split=args.train, mode='train', batch_size=args.batch_size, distributed=args.distributed, gpu=args.gpu, workers=args.num_workers, topk=args.train_topk)
if (args.valid_batch_size is not None):
valid_batch_size = args.valid_batch_size
else:
valid_batch_size = args.batch_size
print(f'Building val loader at GPU {gpu}')
val_loader = get_loader(args, split=args.valid, mode='val', batch_size=valid_batch_size, distributed=args.distributed, gpu=args.gpu, workers=4, topk=args.valid_topk)
print(f'Building test loader at GPU {gpu}')
test_loader = get_loader(args, split=args.test, mode='val', batch_size=valid_batch_size, distributed=args.distributed, gpu=args.gpu, workers=4, topk=args.valid_topk)
trainer = Trainer(args, train_loader, val_loader, test_loader, train=True)
trainer.train() |
def pre_process(entity, batchsize):
processed_entity = []
for j in range(3):
list = []
for i in range(batchsize):
b = entity[i][j]
list.append(b)
c = torch.Tensor(list)
processed_entity.append(c)
return processed_entity |
class ContinuousSamplerTaskPolicies(AbstractMctsPolicies):
def __init__(self, task, Z={}, score_c=1.0, pw_C=1.0, pw_alpha=0.5, unordered=True, *args, **kwargs):
self.task = task
self.Z = Z
sample = ContinuousTaskSample(task, Z, unordered)
super(ContinuousSamplerTaskPolicies, self).__init__(*args, score=PriorProbabilityScore(score_c), widen=ProgressiveWiden(pw_C, pw_alpha), extract=MostVisitedExtract(), sample=sample, initialize=None, **kwargs) |
class CalabresiDataset(Dataset):
def __init__(self, csv_path, crop_type=None, crop_size=(192, 192), resize=None, eps: float=0.0001):
super().__init__()
self.csv_path = csv_path
csv = pd.read_csv(csv_path)
csv.rename(columns={'relapse_last30days': 'relapse'}, inplace=True)
csv['relapse'] = csv['relapse'].map({np.nan: (- 1.0), 'N': 0.0, 'Y': 1.0})
csv['treatment_propagated'] = csv['treatment_propagated'].map({np.nan: (- 1.0), 'N': 0.0, 'Y': 1.0})
csv['treatment'] = csv['treatment'].map({np.nan: 0.0, 'none': 0.0, 'glatiramer acetate': 1.0, 'interferon beta': 2.0, 'natalizumab': 3.0, 'other': 4.0})
csv['duration'] = (csv['duration'].fillna(0.0) + eps)
csv['edss'] = (csv['edss'].fillna(0.0) + eps)
csv['msss'] = (csv['msss'].fillna(0.0) + eps)
csv['fss'] = (csv['fss'].fillna(0.0) + eps)
n_exist = (((~ csv['fss'].isnull()).astype(int) + (~ csv['msss'].isnull()).astype(int)) + (~ csv['edss'].isnull()).astype(int))
n_exist.replace(0.0, 1.0, inplace=True)
csv['score'] = (((csv['edss'] + csv['msss']) + csv['fss']) / n_exist)
csv['sex'] = csv['sex'].map({'M': 0.0, 'F': 1.0})
csv['type'] = csv['type'].map({'HC': 0.0, 'RRMS': 1.0, 'SPMS': 1.0, 'PPMS': 1.0})
csv['slice_ventricle_volume'] = (csv['slice_ventricle_volume'].astype(np.float32) + eps)
csv['ventricle_volume'] = (csv['ventricle_volume'].astype(np.float32) + eps)
csv['slice_brain_volume'] = (csv['slice_brain_volume'].astype(np.float32) + eps)
csv['brain_volume'] = (csv['brain_volume'].astype(np.float32) + eps)
csv['slice_lesion_volume'] = (csv['slice_lesion_volume'].astype(np.float32).fillna(0.0) + eps)
csv['lesion_volume'] = (csv['lesion_volume'].astype(np.float32).fillna(0.0) + eps)
csv['slice_number'] = csv['slice_number'].astype(np.float32)
if csv.isnull().values.any():
raise ValueError(f'There is either an empty space, nan, or otherwise something wrong in the csv {csv_path}')
self.csv = csv
self.crop_type = crop_type
self.crop_size = crop_size
self.resize = resize
def __len__(self):
return len(self.csv)
def __getitem__(self, index):
item = self.csv.loc[index]
item = item.to_dict()
item = self._prepare_item(item)
img_path = item['filename']
img = Image.open(img_path)
transform_list = []
if (self.crop_type is not None):
if (self.crop_type == 'center'):
transform_list += [tv.transforms.CenterCrop(self.crop_size)]
elif (self.crop_type == 'random'):
transform_list += [tv.transforms.RandomCrop(self.crop_size)]
else:
raise ValueError(f'unknown crop type: {self.crop_type}')
if self.resize:
transform_list += [tv.transforms.Resize(self.resize)]
transform_list += [tv.transforms.ToTensor()]
img = tv.transforms.Compose(transform_list)(img)
item['image'] = img
return item
def _prepare_item(item):
item['age'] = torch.as_tensor(item['age'], dtype=torch.float32)
item['sex'] = torch.as_tensor(item['type'], dtype=torch.float32)
item['type'] = torch.as_tensor(item['type'], dtype=torch.float32)
item['relapse'] = torch.as_tensor(item['relapse'], dtype=torch.float32)
item['duration'] = torch.as_tensor(item['duration'], dtype=torch.float32)
item['slice_brain_volume'] = torch.as_tensor(item['slice_brain_volume'], dtype=torch.float32)
item['slice_ventricle_volume'] = torch.as_tensor(item['slice_ventricle_volume'], dtype=torch.float32)
item['slice_lesion_volume'] = torch.as_tensor(item['slice_lesion_volume'], dtype=torch.float32)
item['brain_volume'] = torch.as_tensor(item['brain_volume'], dtype=torch.float32)
item['ventricle_volume'] = torch.as_tensor(item['ventricle_volume'], dtype=torch.float32)
item['lesion_volume'] = torch.as_tensor(item['lesion_volume'], dtype=torch.float32)
item['score'] = torch.as_tensor(item['score'], dtype=torch.float32)
item['edss'] = torch.as_tensor(item['edss'], dtype=torch.float32)
item['fss'] = torch.as_tensor(item['fss'], dtype=torch.float32)
item['msss'] = torch.as_tensor(item['msss'], dtype=torch.float32)
item['treatment'] = torch.as_tensor(item['treatment'], dtype=torch.float32)
item['treatment_propagated'] = torch.as_tensor(item['treatment_propagated'], dtype=torch.float32)
item['slice_number'] = torch.as_tensor(item['slice_number'], dtype=torch.float32)
return item |
def _dense_kernel_initializer(shape, dtype=None):
(fan_in, fan_out) = _compute_fans(shape)
stddev = (1.0 / np.sqrt(fan_in))
return K.random_uniform(shape, (- stddev), stddev, dtype) |
class AllInOneVisualizer():
def __init__(self, evaluation: bool, model_names: [str], ns_prefix: str, visualize_every_x_iterations: int=1, visualize_crash: bool=False):
self._evaluation = evaluation
self.agent_visualization = rospy.Publisher(f'{ns_prefix}all_in_one_action_vis', Marker, queue_size=1)
self.agent_visualization_trajectory = rospy.Publisher(f'{ns_prefix}all_in_one_action_trajectory_vis', Marker, queue_size=1)
self.collision_visualization = rospy.Publisher(f'{ns_prefix}collision_vis', Marker, queue_size=1)
self._setup_trajectory_marker()
self._collisions_markers = []
self._model_names = model_names
self.colors = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 0.0, 1.0], [0.0, 1.0, 1.0]]
self._collisions = 0
self._visualize_every_x_iterations = visualize_every_x_iterations
self._current_iteration = 0
self._visualize_crash = visualize_crash
def visualize_step(self, action: int, is_in_crash: bool, robot_pose: Pose2D):
if ((self._current_iteration % self._visualize_every_x_iterations) == 0):
self._visualize_action(action)
self._visualize_episode_actions_as_path(action, robot_pose)
if (self._visualize_crash and is_in_crash):
self._visualize_collision(robot_pose)
self._collisions += 1
self._current_iteration += 1
def reset_visualizer(self):
if self._evaluation:
self._action_trajectory_marker.points.clear()
self._action_trajectory_marker.colors.clear()
self.agent_visualization_trajectory.publish(self._action_trajectory_marker)
self._remove_collisions_markers()
self._collisions = 0
self._current_iteration = 0
def _setup_trajectory_marker(self):
self._action_trajectory_marker = Marker()
self._action_trajectory_marker.header.frame_id = 'map'
self._action_trajectory_marker.id = 1001
self._action_trajectory_marker.action = visualization_msgs.msg.Marker.ADD
self._action_trajectory_marker.type = visualization_msgs.msg.Marker.LINE_STRIP
self._action_trajectory_marker.scale.z = 0
self._action_trajectory_marker.scale.x = 0.1
self._action_trajectory_marker.scale.y = 0
def _remove_collisions_markers(self):
for m in self._collisions_markers:
m.action = visualization_msgs.msg.Marker.DELETEALL
self.collision_visualization.publish(m)
self._collisions_markers = []
def _visualize_collision(self, robot_pose: Pose2D):
marker = Marker()
marker.header.frame_id = 'map'
marker.header.stamp = rospy.get_rostime()
marker.id = (1002 + self._collisions)
marker.type = visualization_msgs.msg.Marker.TEXT_VIEW_FACING
marker.action = visualization_msgs.msg.Marker.ADD
marker.color.r = 1
marker.color.g = 1
marker.color.b = 0
marker.color.a = 1
marker.scale.z = 0.7
marker.pose.position.x = robot_pose.x
marker.pose.position.y = robot_pose.y
marker.text = 'Collision!'
self._collisions_markers.append(marker)
self.collision_visualization.publish(marker)
def _visualize_episode_actions_as_path(self, action: int, robot_pose: Pose2D):
marker = self._action_trajectory_marker
marker.header.stamp = rospy.get_rostime()
next_point = geometry_msgs.msg.Point()
next_point.x = robot_pose.x
next_point.y = robot_pose.y
next_point.z = 0
marker.points.append(next_point)
next_color = std_msgs.msg.ColorRGBA(self.colors[action][0], self.colors[action][1], self.colors[action][2], 1)
marker.colors.append(next_color)
self._action_trajectory_marker = marker
self.agent_visualization_trajectory.publish(self._action_trajectory_marker)
def _visualize_action(self, action: int):
marker = Marker()
marker.header.stamp = rospy.get_rostime()
marker.header.frame_id = 'map'
marker.header.stamp = rospy.get_rostime()
marker.id = 1000
marker.type = visualization_msgs.msg.Marker.TEXT_VIEW_FACING
marker.action = visualization_msgs.msg.Marker.ADD
marker.color.r = self.colors[action][0]
marker.color.g = self.colors[action][1]
marker.color.b = self.colors[action][2]
marker.color.a = 1
marker.pose.position.x = (- 0.5)
marker.pose.position.y = (- 1.5)
marker.scale.z = 1
marker.text = self._model_names[action]
self.agent_visualization.publish(marker) |
class Metric(metaclass=ABCMeta):
_initialized = False
def __init__(self, threaded=False, use_deque=False) -> None:
super().__init__()
self._initialized = True
self._threaded = threaded
if self._threaded:
logger.trace(f'{self.__class__.__name__} spawn a thread')
self._queue = ThreadQueue(use_deque=use_deque)
self._worker = Thread(target=self._worker_func, name='threaded_worker', args=(self._queue,))
self._worker.start()
atexit.register(self.close)
def reset(self):
pass
def add(self, *args, **kwargs):
assert self._initialized, f'{self.__class__.__name__} must be initialized by overriding __init__'
if (not self._threaded):
return self._add(*args, **kwargs)
return self._add_queue(*args, **kwargs)
def _add(self, *args, **kwargs):
pass
def _add_queue(self, *args, **kwargs):
self._queue.put((args, kwargs))
def summary(self):
return self._summary()
def _summary(self):
pass
def _worker_func(self, input_queue: ThreadQueue):
while True:
try:
(args, kwags) = input_queue.get()
except IndexError:
continue
if isinstance(args[0], _StopToken):
break
self._add(*args, **kwags)
def join(self):
if (not self._threaded):
return
self.close()
logger.trace(f'{self.__class__.__name__} join the thread')
self._worker.join()
logger.trace(f'{self.__class__.__name__} end the thread')
def close(self):
self._add_queue(_StopToken()) |
def sample_and_group(npoint, radius, nsample, xyz, points, new_xyz=None, knn=False, use_xyz=True):
sample_idx = farthest_point_sample(npoint, xyz)
if (new_xyz is None):
new_xyz = gather_point(xyz, sample_idx)
if knn:
(_, idx) = knn_point(nsample, xyz, new_xyz)
else:
(idx, pts_cnt) = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])
if (points is not None):
grouped_points = group_point(points, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_points], axis=(- 1))
else:
new_points = grouped_points
else:
new_points = grouped_xyz
return (new_xyz, new_points, idx, sample_idx, grouped_xyz) |
def train_model(model, criterion, optimizer, scheduler, cfg):
best_acc = 0.0
best_map = 0.0
best_model_wts = copy.deepcopy(model.state_dict())
for epoch in range(1, cfg['max_epoch']):
print(('-' * 60))
print('Epoch: {} / {}'.format(epoch, cfg['max_epoch']))
print(('-' * 60))
for phrase in ['train', 'test']:
if (phrase == 'train'):
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
(ft_all, lbl_all) = (None, None)
for (i, (centers, corners, normals, neighbor_index, targets)) in enumerate(data_loader[phrase]):
centers = centers.cuda()
corners = corners.cuda()
normals = normals.cuda()
neighbor_index = neighbor_index.cuda()
targets = targets.cuda()
with torch.set_grad_enabled((phrase == 'train')):
(outputs, feas) = model(centers, corners, normals, neighbor_index)
(_, preds) = torch.max(outputs, 1)
loss = criterion(outputs, targets)
if (phrase == 'train'):
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((phrase == 'test') and cfg['retrieval_on']):
ft_all = append_feature(ft_all, feas.detach().cpu())
lbl_all = append_feature(lbl_all, targets.detach().cpu(), flaten=True)
running_loss += (loss.item() * centers.size(0))
running_corrects += torch.sum((preds == targets.data))
epoch_loss = (running_loss / len(data_set[phrase]))
epoch_acc = (running_corrects.double() / len(data_set[phrase]))
if (phrase == 'train'):
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phrase, epoch_loss, epoch_acc))
scheduler.step()
if (phrase == 'test'):
if (epoch_acc > best_acc):
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print_info = '{} Loss: {:.4f} Acc: {:.4f} (best {:.4f})'.format(phrase, epoch_loss, epoch_acc, best_acc)
if cfg['retrieval_on']:
epoch_map = calculate_map(ft_all, lbl_all)
if (epoch_map > best_map):
best_map = epoch_map
print_info += ' mAP: {:.4f}'.format(epoch_map)
if ((epoch % cfg['save_steps']) == 0):
torch.save(copy.deepcopy(model.state_dict()), os.path.join(cfg['ckpt_root'], '{}.pkl'.format(epoch)))
print(print_info)
print('Best val acc: {:.4f}'.format(best_acc))
print('Config: {}'.format(cfg))
return best_model_wts |
def write_next_bytes(fid, data, format_char_sequence, endian_character='<'):
if isinstance(data, (list, tuple)):
bytes = struct.pack((endian_character + format_char_sequence), *data)
else:
bytes = struct.pack((endian_character + format_char_sequence), data)
fid.write(bytes) |
def train(args, train_dataset, model, tokenizer, train_highway=False):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
if train_highway:
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (('highway' in n) and (not any(((nd in n) for nd in no_decay))))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if (('highway' in n) and any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.0}]
else:
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (('highway' not in n) and (not any(((nd in n) for nd in no_decay))))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if (('highway' not in n) and any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet']) else None)
inputs['train_highway'] = train_highway
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
class JETS(AbsGANTTS):
def __init__(self, idim: int, odim: int, sampling_rate: int=22050, generator_type: str='jets_generator', generator_params: Dict[(str, Any)]={'adim': 256, 'aheads': 2, 'elayers': 4, 'eunits': 1024, 'dlayers': 4, 'dunits': 1024, 'positionwise_layer_type': 'conv1d', 'positionwise_conv_kernel_size': 1, 'use_scaled_pos_enc': True, 'use_batch_norm': True, 'encoder_normalize_before': True, 'decoder_normalize_before': True, 'encoder_concat_after': False, 'decoder_concat_after': False, 'reduction_factor': 1, 'encoder_type': 'transformer', 'decoder_type': 'transformer', 'transformer_enc_dropout_rate': 0.1, 'transformer_enc_positional_dropout_rate': 0.1, 'transformer_enc_attn_dropout_rate': 0.1, 'transformer_dec_dropout_rate': 0.1, 'transformer_dec_positional_dropout_rate': 0.1, 'transformer_dec_attn_dropout_rate': 0.1, 'conformer_rel_pos_type': 'latest', 'conformer_pos_enc_layer_type': 'rel_pos', 'conformer_self_attn_layer_type': 'rel_selfattn', 'conformer_activation_type': 'swish', 'use_macaron_style_in_conformer': True, 'use_cnn_in_conformer': True, 'zero_triu': False, 'conformer_enc_kernel_size': 7, 'conformer_dec_kernel_size': 31, 'duration_predictor_layers': 2, 'duration_predictor_chans': 384, 'duration_predictor_kernel_size': 3, 'duration_predictor_dropout_rate': 0.1, 'energy_predictor_layers': 2, 'energy_predictor_chans': 384, 'energy_predictor_kernel_size': 3, 'energy_predictor_dropout': 0.5, 'energy_embed_kernel_size': 1, 'energy_embed_dropout': 0.5, 'stop_gradient_from_energy_predictor': False, 'pitch_predictor_layers': 5, 'pitch_predictor_chans': 384, 'pitch_predictor_kernel_size': 5, 'pitch_predictor_dropout': 0.5, 'pitch_embed_kernel_size': 1, 'pitch_embed_dropout': 0.5, 'stop_gradient_from_pitch_predictor': True, 'generator_out_channels': 1, 'generator_channels': 512, 'generator_global_channels': (- 1), 'generator_kernel_size': 7, 'generator_upsample_scales': [8, 8, 2, 2], 'generator_upsample_kernel_sizes': [16, 16, 4, 4], 'generator_resblock_kernel_sizes': [3, 7, 11], 'generator_resblock_dilations': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'generator_use_additional_convs': True, 'generator_bias': True, 'generator_nonlinear_activation': 'LeakyReLU', 'generator_nonlinear_activation_params': {'negative_slope': 0.1}, 'generator_use_weight_norm': True, 'segment_size': 64, 'spks': (- 1), 'langs': (- 1), 'spk_embed_dim': None, 'spk_embed_integration_type': 'add', 'use_gst': False, 'gst_tokens': 10, 'gst_heads': 4, 'gst_conv_layers': 6, 'gst_conv_chans_list': [32, 32, 64, 64, 128, 128], 'gst_conv_kernel_size': 3, 'gst_conv_stride': 2, 'gst_gru_layers': 1, 'gst_gru_units': 128, 'init_type': 'xavier_uniform', 'init_enc_alpha': 1.0, 'init_dec_alpha': 1.0, 'use_masking': False, 'use_weighted_masking': False}, discriminator_type: str='hifigan_multi_scale_multi_period_discriminator', discriminator_params: Dict[(str, Any)]={'scales': 1, 'scale_downsample_pooling': 'AvgPool1d', 'scale_downsample_pooling_params': {'kernel_size': 4, 'stride': 2, 'padding': 2}, 'scale_discriminator_params': {'in_channels': 1, 'out_channels': 1, 'kernel_sizes': [15, 41, 5, 3], 'channels': 128, 'max_downsample_channels': 1024, 'max_groups': 16, 'bias': True, 'downsample_scales': [2, 2, 4, 4, 1], 'nonlinear_activation': 'LeakyReLU', 'nonlinear_activation_params': {'negative_slope': 0.1}, 'use_weight_norm': True, 'use_spectral_norm': False}, 'follow_official_norm': False, 'periods': [2, 3, 5, 7, 11], 'period_discriminator_params': {'in_channels': 1, 'out_channels': 1, 'kernel_sizes': [5, 3], 'channels': 32, 'downsample_scales': [3, 3, 3, 3, 1], 'max_downsample_channels': 1024, 'bias': True, 'nonlinear_activation': 'LeakyReLU', 'nonlinear_activation_params': {'negative_slope': 0.1}, 'use_weight_norm': True, 'use_spectral_norm': False}}, generator_adv_loss_params: Dict[(str, Any)]={'average_by_discriminators': False, 'loss_type': 'mse'}, discriminator_adv_loss_params: Dict[(str, Any)]={'average_by_discriminators': False, 'loss_type': 'mse'}, feat_match_loss_params: Dict[(str, Any)]={'average_by_discriminators': False, 'average_by_layers': False, 'include_final_outputs': True}, mel_loss_params: Dict[(str, Any)]={'fs': 22050, 'n_fft': 1024, 'hop_length': 256, 'win_length': None, 'window': 'hann', 'n_mels': 80, 'fmin': 0, 'fmax': None, 'log_base': None}, lambda_adv: float=1.0, lambda_mel: float=45.0, lambda_feat_match: float=2.0, lambda_var: float=1.0, lambda_align: float=2.0, cache_generator_outputs: bool=True):
assert check_argument_types()
super().__init__()
generator_class = AVAILABLE_GENERATERS[generator_type]
generator_params.update(idim=idim, odim=odim)
self.generator = generator_class(**generator_params)
discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type]
self.discriminator = discriminator_class(**discriminator_params)
self.generator_adv_loss = GeneratorAdversarialLoss(**generator_adv_loss_params)
self.discriminator_adv_loss = DiscriminatorAdversarialLoss(**discriminator_adv_loss_params)
self.feat_match_loss = FeatureMatchLoss(**feat_match_loss_params)
self.mel_loss = MelSpectrogramLoss(**mel_loss_params)
self.var_loss = VarianceLoss()
self.forwardsum_loss = ForwardSumLoss()
self.lambda_adv = lambda_adv
self.lambda_mel = lambda_mel
self.lambda_feat_match = lambda_feat_match
self.lambda_var = lambda_var
self.lambda_align = lambda_align
self.cache_generator_outputs = cache_generator_outputs
self._cache = None
self.fs = sampling_rate
self.spks = self.generator.spks
self.langs = self.generator.langs
self.spk_embed_dim = self.generator.spk_embed_dim
def require_raw_speech(self):
return True
def require_vocoder(self):
return False
def forward(self, text: torch.Tensor, text_lengths: torch.Tensor, feats: torch.Tensor, feats_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, sids: Optional[torch.Tensor]=None, spembs: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None, forward_generator: bool=True, **kwargs) -> Dict[(str, Any)]:
if forward_generator:
return self._forward_generator(text=text, text_lengths=text_lengths, feats=feats, feats_lengths=feats_lengths, speech=speech, speech_lengths=speech_lengths, sids=sids, spembs=spembs, lids=lids, **kwargs)
else:
return self._forward_discrminator(text=text, text_lengths=text_lengths, feats=feats, feats_lengths=feats_lengths, speech=speech, speech_lengths=speech_lengths, sids=sids, spembs=spembs, lids=lids, **kwargs)
def _forward_generator(self, text: torch.Tensor, text_lengths: torch.Tensor, feats: torch.Tensor, feats_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, sids: Optional[torch.Tensor]=None, spembs: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None, **kwargs) -> Dict[(str, Any)]:
batch_size = text.size(0)
speech = speech.unsqueeze(1)
reuse_cache = True
if ((not self.cache_generator_outputs) or (self._cache is None)):
reuse_cache = False
outs = self.generator(text=text, text_lengths=text_lengths, feats=feats, feats_lengths=feats_lengths, sids=sids, spembs=spembs, lids=lids, **kwargs)
else:
outs = self._cache
if (self.training and self.cache_generator_outputs and (not reuse_cache)):
self._cache = outs
(speech_hat_, bin_loss, log_p_attn, start_idxs, d_outs, ds, p_outs, ps, e_outs, es) = outs
speech_ = get_segments(x=speech, start_idxs=(start_idxs * self.generator.upsample_factor), segment_size=(self.generator.segment_size * self.generator.upsample_factor))
p_hat = self.discriminator(speech_hat_)
with torch.no_grad():
p = self.discriminator(speech_)
mel_loss = self.mel_loss(speech_hat_, speech_)
adv_loss = self.generator_adv_loss(p_hat)
feat_match_loss = self.feat_match_loss(p_hat, p)
(dur_loss, pitch_loss, energy_loss) = self.var_loss(d_outs, ds, p_outs, ps, e_outs, es, text_lengths)
forwardsum_loss = self.forwardsum_loss(log_p_attn, text_lengths, feats_lengths)
mel_loss = (mel_loss * self.lambda_mel)
adv_loss = (adv_loss * self.lambda_adv)
feat_match_loss = (feat_match_loss * self.lambda_feat_match)
g_loss = ((mel_loss + adv_loss) + feat_match_loss)
var_loss = (((dur_loss + pitch_loss) + energy_loss) * self.lambda_var)
align_loss = ((forwardsum_loss + bin_loss) * self.lambda_align)
loss = ((g_loss + var_loss) + align_loss)
stats = dict(generator_loss=loss.item(), generator_g_loss=g_loss.item(), generator_var_loss=var_loss.item(), generator_align_loss=align_loss.item(), generator_g_mel_loss=mel_loss.item(), generator_g_adv_loss=adv_loss.item(), generator_g_feat_match_loss=feat_match_loss.item(), generator_var_dur_loss=dur_loss.item(), generator_var_pitch_loss=pitch_loss.item(), generator_var_energy_loss=energy_loss.item(), generator_align_forwardsum_loss=forwardsum_loss.item(), generator_align_bin_loss=bin_loss.item())
(loss, stats, weight) = force_gatherable((loss, stats, batch_size), loss.device)
if (reuse_cache or (not self.training)):
self._cache = None
return {'loss': loss, 'stats': stats, 'weight': weight, 'optim_idx': 0}
def _forward_discrminator(self, text: torch.Tensor, text_lengths: torch.Tensor, feats: torch.Tensor, feats_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, sids: Optional[torch.Tensor]=None, spembs: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None, **kwargs) -> Dict[(str, Any)]:
batch_size = text.size(0)
speech = speech.unsqueeze(1)
reuse_cache = True
if ((not self.cache_generator_outputs) or (self._cache is None)):
reuse_cache = False
outs = self.generator(text=text, text_lengths=text_lengths, feats=feats, feats_lengths=feats_lengths, sids=sids, spembs=spembs, lids=lids, **kwargs)
else:
outs = self._cache
if (self.cache_generator_outputs and (not reuse_cache)):
self._cache = outs
(speech_hat_, _, _, start_idxs, *_) = outs
speech_ = get_segments(x=speech, start_idxs=(start_idxs * self.generator.upsample_factor), segment_size=(self.generator.segment_size * self.generator.upsample_factor))
p_hat = self.discriminator(speech_hat_.detach())
p = self.discriminator(speech_)
(real_loss, fake_loss) = self.discriminator_adv_loss(p_hat, p)
loss = (real_loss + fake_loss)
stats = dict(discriminator_loss=loss.item(), discriminator_real_loss=real_loss.item(), discriminator_fake_loss=fake_loss.item())
(loss, stats, weight) = force_gatherable((loss, stats, batch_size), loss.device)
if (reuse_cache or (not self.training)):
self._cache = None
return {'loss': loss, 'stats': stats, 'weight': weight, 'optim_idx': 1}
def inference(self, text: torch.Tensor, feats: Optional[torch.Tensor]=None, pitch: Optional[torch.Tensor]=None, energy: Optional[torch.Tensor]=None, use_teacher_forcing: bool=False, **kwargs) -> Dict[(str, torch.Tensor)]:
text = text[None]
text_lengths = torch.tensor([text.size(1)], dtype=torch.long, device=text.device)
if use_teacher_forcing:
assert (feats is not None)
feats = feats[None]
feats_lengths = torch.tensor([feats.size(1)], dtype=torch.long, device=feats.device)
pitch = pitch[None]
energy = energy[None]
(wav, dur) = self.generator.inference(text=text, text_lengths=text_lengths, feats=feats, feats_lengths=feats_lengths, pitch=pitch, energy=energy, use_teacher_forcing=use_teacher_forcing, **kwargs)
else:
(wav, dur) = self.generator.inference(text=text, text_lengths=text_lengths, **kwargs)
return dict(wav=wav.view((- 1)), duration=dur[0]) |
class GradOutputMonitor(BaseMonitor):
def __init__(self, net: nn.Module, instance: (Any or tuple)=None, function_on_grad_output: Callable=(lambda x: x)):
super().__init__()
self.function_on_grad_output = function_on_grad_output
for (name, m) in net.named_modules():
if isinstance(m, instance):
self.monitored_layers.append(name)
self.name_records_index[name] = []
if (torch.__version__ >= torch.torch_version.TorchVersion('1.8.0')):
self.hooks.append(m.register_full_backward_hook(self.create_hook(name)))
else:
self.hooks.append(m.register_backward_hook(self.create_hook(name)))
def create_hook(self, name):
def hook(m, grad_input, grad_output):
if self.is_enable():
self.name_records_index[name].append(self.records.__len__())
self.records.append(self.function_on_grad_output(unpack_len1_tuple(grad_output)))
return hook |
class DataArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
max_seq_length: Optional[int] = field(default=512, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.'})
validation_split_percentage: Optional[int] = field(default=0, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
keep_in_memory: bool = field(default=False, metadata={'help': 'Whether to keep in memory the loaded dataset. Defaults to False.'})
dataset_seed: int = field(default=42, metadata={'help': 'Seed to use in dataset processing, different seeds might yield different datasets. This seed and the seed in training arguments are not related'})
dataset_cache_directory: Optional[str] = field(default=None, metadata={'help': 'Path to directory where the processed dataset will be saved. If path exists, try to load processed dataset from this path.'})
dataset_concatenation: Optional[bool] = field(default=False, metadata={'help': 'Whether to concatenate the sentence for more efficient training.'})
special_tokens: Optional[List[str]] = field(default=None, metadata={'help': 'The list of special tokens to add in tokenizer.'})
max_source_length: Optional[int] = field(default=384, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
num_beams: Optional[int] = field(default=4, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
streaming: bool = field(default=False, metadata={'help': 'Enable streaming mode'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
def __post_init__(self):
if self.streaming:
require_version('datasets>=2.0.0', 'The streaming feature requires `datasets>=2.0.0`')
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, a json or a txt file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`validation_file` should be a csv, a json or a txt file.' |
def parse_args():
parser = argparse.ArgumentParser(description='Process arguments')
parser.add_argument('--model_type', type=str, required=True)
parser.add_argument('--datasize', type=int, default=200, required=False)
parser.add_argument('--epoch', type=int, default=2, required=False)
parser.add_argument('--hiddien_size', type=int, default=32, required=False)
parser.add_argument('--head_num', type=int, default=4, required=False)
parser.add_argument('--layer_num', type=int, default=3, required=False)
parser.add_argument('--seq_length', type=int, default=16, required=False)
parser.add_argument('--batchsize', type=int, default=8, required=False)
parser.add_argument('--distributed', default=False, action='store_true')
parser.add_argument('--user_created_dataloader', default=False, action='store_true')
parser.add_argument('--load_strategy', default=False, action='store_true')
parser.add_argument('--optim_grouped_params', default=False, action='store_true')
parser.add_argument('--log_interval', type=int, default=10, required=False)
parser.add_argument('--use_fsdp', default=False, action='store_true')
parser.add_argument('--use_amp', default=False, action='store_true')
parser.add_argument('--use_checkpointing', default=False, action='store_true')
parser.add_argument('--use_module_replace', default=False, action='store_true')
return parser.parse_args() |
def single_or_none(sequence: List[E]) -> Optional[E]:
assert (len(sequence) <= 1)
return next(iter(sequence), None) |
def combine_collision_reports(r1: CollisionReport, r2: CollisionReport) -> CollisionReport:
if (r1.players.keys() != r2.players.keys()):
raise ZValueError('Cannot combine collision reports with different players', report1=r1, report2=r2)
(first_report, second_report) = ((r1, r2) if (r1.at_time <= r2.at_time) else (r2, r1))
combined_players_report: dict[(PlayerName, CollisionReportPlayer)] = {}
for p in first_report.players:
(r1_player, r2_player) = (first_report.players[p], second_report.players[p])
combined_players_report[p] = replace(r1_player, locations=(r1_player.locations + r2_player.locations), velocity_after=r2_player.velocity_after, energy_delta=(r1_player.energy_delta + r2_player.energy_delta))
return replace(first_report, players=combined_players_report) |
class PTBTokenizer():
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, 'edu.stanford.nlp.process.PTBTokenizer', '-preserveLines', '-lowerCase']
final_tokenized_captions_for_image = {}
image_id = [k for (k, v) in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for (k, v) in captions_for_image.items() for c in v])
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file_name = (((((path_to_jar_dirname + '/temp_file') + str(random.randint(0, 999999))) + '_pid_') + str(os.getpid())) + '.tmp')
with open(tmp_file_name, 'w') as tmp_file:
tmp_file.write(sentences)
cmd.append(tmp_file_name)
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
token_lines = token_lines.decode('utf-8')
lines = token_lines.split('\n')
if os.path.isfile(tmp_file_name):
os.remove(tmp_file_name)
for (k, line) in zip(image_id, lines):
if (not (k in final_tokenized_captions_for_image)):
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') if (w not in PUNCTUATIONS)])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image |
class LossHistory():
def __init__(self):
self.steps = []
self.loss_train = []
self.loss_test = []
self.metrics_test = []
self.loss_weights = None
def set_loss_weights(self, loss_weights):
self.loss_weights = loss_weights
def append(self, step, loss_train, loss_test, metrics_test):
self.steps.append(step)
self.loss_train.append(loss_train)
if (loss_test is None):
loss_test = self.loss_test[(- 1)]
if (metrics_test is None):
metrics_test = self.metrics_test[(- 1)]
self.loss_test.append(loss_test)
self.metrics_test.append(metrics_test) |
class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_sem_seg(gt_root, image_root, gt_ext='png', image_ext='jpg'):
def file2id(folder_path, file_path):
image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
image_id = os.path.splitext(image_id)[0]
return image_id
input_files = sorted((os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=(lambda file_path: file2id(image_root, file_path)))
gt_files = sorted((os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=(lambda file_path: file2id(gt_root, file_path)))
assert (len(gt_files) > 0), 'No annotations found in {}.'.format(gt_root)
if (len(input_files) != len(gt_files)):
logger.warn('Directory {} and {} has {} and {} files, respectively.'.format(image_root, gt_root, len(input_files), len(gt_files)))
input_basenames = [os.path.basename(f)[:(- len(image_ext))] for f in input_files]
gt_basenames = [os.path.basename(f)[:(- len(gt_ext))] for f in gt_files]
intersect = list((set(input_basenames) & set(gt_basenames)))
intersect = sorted(intersect)
logger.warn('Will use their intersection of {} files.'.format(len(intersect)))
input_files = [os.path.join(image_root, (f + image_ext)) for f in intersect]
gt_files = [os.path.join(gt_root, (f + gt_ext)) for f in intersect]
logger.info('Loaded {} images with semantic segmentation from {}'.format(len(input_files), image_root))
dataset_dicts = []
for (img_path, gt_path) in zip(input_files, gt_files):
local_path = PathManager.get_local_path(gt_path)
(w, h) = imagesize.get(local_path)
record = {}
record['file_name'] = img_path
record['sem_seg_file_name'] = gt_path
record['height'] = h
record['width'] = w
dataset_dicts.append(record)
return dataset_dicts |
def _two_particle_antisymmetric_spatial_wavefn(unused_params, x):
del unused_params
wavefn_amp = (x[(..., 0, 0)] - x[(..., 1, 0)])
return array_to_slog(wavefn_amp) |
class CustomDataset(Dataset):
def __init__(self, size=1000):
(x, y) = get_x_y(size=size)
self.x = torch.from_numpy(x).float()
self.y = torch.from_numpy(y).float()
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
return (self.x[idx], self.y[idx]) |
def get_network(weights):
if (weights in WEIGHTS_URLS.keys()):
arch_params = WEIGHTS_URLS[weights]['arch_params']
url = WEIGHTS_URLS[weights]['url']
name = WEIGHTS_URLS[weights]['name']
else:
raise ValueError('Available RRDN network weights: {}'.format(list(WEIGHTS_URLS.keys())))
c_dim = 3
kernel_size = 3
return (arch_params, c_dim, kernel_size, url, name) |
def encoder_init(m):
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain('relu')) |
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[label_column]
model_inputs = tokenizer(inputs, max_length=max_length, padding='max_length', truncation=True, return_tensors='pt')
labels = tokenizer(targets, max_length=2, padding='max_length', truncation=True, return_tensors='pt')
labels = labels['input_ids']
labels[(labels == tokenizer.pad_token_id)] = (- 100)
model_inputs['labels'] = labels
return model_inputs |
_sentencepiece
_tokenizers
class ReformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = ReformerTokenizer
rust_tokenizer_class = ReformerTokenizerFast
test_rust_tokenizer = True
test_seq2seq = False
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
token = '<s>'
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<unk>')
self.assertEqual(vocab_keys[1], '<s>')
self.assertEqual(vocab_keys[(- 1)], 'j')
self.assertEqual(len(vocab_keys), 1000)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1000)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = 'I was born in 92000, and this is false.'
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_padding(self, max_length=15):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
s = 'This is a simple input'
s2 = ['This is a simple input 1', 'This is a simple input 2']
p = ('This is a simple input', 'This is a pair')
p2 = [('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')]
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding='max_length')
def test_padding_different_model_input_name(self):
pass
def test_full_tokenizer(self):
tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.'])
_property
def big_tokenizer(self):
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment')
def test_tokenization_base_easy_symbols(self):
symbols = 'Hello World!'
original_tokenizer_encodings = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
def test_tokenization_base_hard_symbols(self):
symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
original_tokenizer_encodings = [108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import ReformerConfig, ReformerModel
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors='pt')
batch_encoded_sequence = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt')
config = ReformerConfig()
config.axial_pos_shape = encoded_sequence['input_ids'].shape
model = ReformerModel(config)
assert (model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size)
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
sequences = ['This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.']
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='google/reformer-crime-and-punishment', revision='0e6c3decb8211d49bfdc8b0448b3f5a', padding=False, sequences=sequences) |
def cal_acc(x, gt):
x = x.squeeze(0)
gt = gt.squeeze(0)
if (x.shape[0] > x.shape[1]):
x = x.t()
gt = gt.t()
b = (x.max(1).indices == gt.max(1).indices)
if (b.shape[0] != x.shape[0]):
raise RuntimeError('b.shape[0] != x.shape[0]')
return (int(b.sum()), b.shape[0]) |
class BoundedArchive(Archive[S]):
def __init__(self, maximum_size: int, comparator: Comparator[S]=None, density_estimator: DensityEstimator=None, dominance_comparator: Comparator[S]=DominanceComparator()):
super(BoundedArchive, self).__init__()
self.maximum_size = maximum_size
self.comparator = comparator
self.density_estimator = density_estimator
self.non_dominated_solution_archive = NonDominatedSolutionsArchive(dominance_comparator=dominance_comparator)
self.solution_list = self.non_dominated_solution_archive.solution_list
def compute_density_estimator(self):
self.density_estimator.compute_density_estimator(self.solution_list)
def add(self, solution: S) -> bool:
success = self.non_dominated_solution_archive.add(solution)
if success:
if (self.size() > self.maximum_size):
self.compute_density_estimator()
(worst_solution, index_to_remove) = self.__find_worst_solution(self.solution_list)
self.solution_list.pop(index_to_remove)
return success
def __find_worst_solution(self, solution_list: List[S]) -> S:
if (solution_list is None):
raise Exception('The solution list is None')
elif (len(solution_list) == 0):
raise Exception('The solution list is empty')
worst_solution = solution_list[0]
index_to_remove = 0
for (solution_index, solution) in enumerate(solution_list[1:]):
if (self.comparator.compare(worst_solution, solution) < 0):
worst_solution = solution
index_to_remove = (solution_index + 1)
return (worst_solution, index_to_remove) |
def set_cursor_enter_callback(window, cbfun):
window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value
if (window_addr in _cursor_enter_callback_repository):
previous_callback = _cursor_enter_callback_repository[window_addr]
else:
previous_callback = None
if (cbfun is None):
cbfun = 0
c_cbfun = _GLFWcursorenterfun(cbfun)
_cursor_enter_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCursorEnterCallback(window, cbfun)
if ((previous_callback is not None) and (previous_callback[0] != 0)):
return previous_callback[0] |
def allclose(net1: nn.Module, net2: nn.Module, /) -> bool:
for (p1, p2) in zip(net1.parameters(), net2.parameters()):
try:
if (not p1.allclose(p2)):
return False
except RuntimeError:
return False
return True |
def make_random_policy_rubiks_cube(rubiks_cube: RubiksCube) -> RandomPolicy:
action_minimum = rubiks_cube.action_spec().minimum
action_maximum = rubiks_cube.action_spec().maximum
def random_policy(observation: Observation, key: chex.PRNGKey) -> chex.Array:
batch_size = observation.cube.shape[0]
action = jax.random.randint(key, (batch_size, len(action_minimum)), minval=action_minimum, maxval=action_maximum)
return action
return random_policy |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, stop_layer):
self.inplanes = 64
super(ResNet, self).__init__()
self.stop = stop_layer
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
if (not (stop_layer == 'layer4')):
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion)) or (dilation == 2) or (dilation == 4)):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), affine=affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self, block, dilation_series, padding_series, num_classes):
return block(dilation_series, padding_series, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
out1 = self.layer1(x)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
if (not (self.stop == 'layer4')):
out4 = self.layer4(out3)
outputs = []
outputs.append(out1)
outputs.append(out2)
outputs.append(out3)
if (not (self.stop == 'layer4')):
outputs.append(out4)
return outputs |
def text_is_range(tag_before_tag, tag, tag_after_tag):
return ((tag_before_tag[1] == 'CD') and (tag[0] == '-') and (tag_after_tag[1] == 'CD')) |
def validate(content, path):
match = re.search('.*-([a-f0-9]{8,})\\.[a-zA-Z0-9]{2,}$', path)
if match:
stored_hsh = match.group(1)
computed_hsh = hashlib.sha256(content).hexdigest()[:len(stored_hsh)]
if (computed_hsh != stored_hsh):
raise ValueError(("Computed hash '%s' is not consistent with stored hash '%s'" % (computed_hsh, stored_hsh))) |
def avg_spatial_inner_product(a, b=None, batch_dims: int=0):
_a = tf.reshape(a, (list(a.shape[:(- 3)]) + [(- 1), a.shape[(- 1)]]))
if (b is None):
_b = _a
else:
_b = tf.reshape(b, (list(b.shape[:(- 3)]) + [(- 1), b.shape[(- 1)]]))
batch_axes = (2 * [list(range(batch_dims))])
prod = batch_tensordot(_a, _b, axes=[(- 1), (- 1)], batch_axes=batch_axes)
return tf.reduce_mean(prod, [(- 3), (- 1)]) |
def initializer(in_filters, out_filters, name, k_size=3):
w1 = tf.get_variable((name + 'W'), [k_size, k_size, in_filters, out_filters], initializer=tf.truncated_normal_initializer())
b1 = tf.get_variable((name + 'B'), [out_filters], initializer=tf.truncated_normal_initializer())
return (w1, b1) |
class BackendNull(backend.Backend):
def __init__(self):
super(BackendNull, self).__init__()
def version(self):
return '-'
def name(self):
return 'null'
def image_format(self):
return 'NHWC'
def load(self, model_path, inputs=None, outputs=None):
self.outputs = ['output']
self.inputs = ['input']
return self
def predict(self, feed):
time.sleep(0)
return [[0]] |
def make_builder(out_file, impl, vocab_size=None):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file, dtype=best_fitting_int_dtype(vocab_size))
elif (impl == 'fasta'):
raise NotImplementedError
elif (impl == 'huffman'):
raise ValueError('Use HuffmanCodeBuilder directly as it has a different interface.')
else:
return IndexedDatasetBuilder(out_file) |
def read_samples(file_path, word2idx, whitelist):
samples = []
with open(file_path, 'r') as f:
for line in f:
(task, story) = line.rstrip('\n').split('\t')
if ((str(task) in whitelist.split('t')) or (len(whitelist) == 0)):
words = story.split(' ')
EOS = word2idx[END_OF_STORY]
x = ([EOS] + [word2idx[word] for word in words])
y = ([word2idx[word] for word in words] + [EOS])
t = ([int(task)] * len(x))
samples.append((x, y, t))
return samples |
def test_column_parallel_linear(model_parallel_size):
mpu.initialize_model_parallel(model_parallel_size)
if (torch.distributed.get_rank() == 0):
print('> testing ColumnParallelLinear with model parallel size: {}'.format(model_parallel_size))
model_parallel_size = mpu.get_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
input_size_coeff = 13
input_size = (input_size_coeff * model_parallel_size)
output_size_coeff = 17
output_size = (output_size_coeff * model_parallel_size)
batch_size = 7
identity_layer = IdentityLayer2D(batch_size, input_size).cuda()
linear_layer = mpu.ColumnParallelLinear(input_size, output_size, keep_master_weight_for_test=True).cuda()
loss_weight = torch.randn([batch_size, output_size]).cuda()
input_ = identity_layer()
output = linear_layer(input_)
loss = torch.mul(output, loss_weight).sum()
loss.backward()
dLdY = loss_weight
X = identity_layer.weight
A = linear_layer.master_weight.cuda()
dLdA = torch.matmul(dLdY.t(), X)
dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view((- 1))
dLdX = torch.matmul(dLdY, A)
rank = mpu.get_model_parallel_rank()
my_dLdA = torch.split(dLdA, output_size_coeff, dim=0)[rank].contiguous().clone()
error = my_dLdA.sub(linear_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(' error in dLdA on global rank {}: {}'.format(torch.distributed.get_rank(), error))
assert (error < 1e-06)
my_dLdb = torch.split(dLdb, output_size_coeff, dim=0)[rank].contiguous().clone()
error = my_dLdb.sub(linear_layer.bias.grad).abs().max()
torch.distributed.barrier()
print(' error in dLdb on global rank {}: {}'.format(torch.distributed.get_rank(), error))
assert (error < 1e-06)
error = dLdX.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(' error in dLdX on global rank {}: {}'.format(torch.distributed.get_rank(), error))
assert (error < 1e-06)
mpu.destroy_model_parallel()
torch.distributed.barrier()
if (torch.distributed.get_rank() == 0):
print(' >> passed the test :-)') |
def analyze_sentence(logit: List, inp_entropy: List, pred_dist: numpy.ndarray, input_doc, input_bigram, nucleus_filter: bool=True, top_p=0.9) -> List:
viz_outputs = []
for (log, ent) in zip(logit, inp_entropy):
viz_outputs.append('{0}_{1:.1f}'.format(bpe_tokenizer.decode(log), ent))
print(' '.join(viz_outputs))
cand_bigram = get_bigram(logit)
l = len(logit)
rt = []
return_pos = []
return_pos.append([0, l, comp_entropy(pred_dist[0], nucleus_filter, top_p)])
for (idx, big) in enumerate(cand_bigram):
t = big[1][0]
ent = comp_entropy(pred_dist[t], nucleus_filter, top_p)
tok = big[1][2]
(bigran, trigram) = (False, False)
if (t >= 0):
_can_big = f'{big[0][2]}_{big[1][2]}'
if (_can_big in input_bigram):
bigran = True
else:
bigran = False
rt.append([t, l, ent, 0, tok, bigran, trigram])
return_pos.append([t, l, ent])
return (rt, return_pos) |
def decompress_data(compressor, Z_bytes):
start = time.time()
with torch.no_grad():
Z_hat = [compressor.decompress(b).cpu().numpy() for b in Z_bytes]
sec_per_img = ((time.time() - start) / len(Z_hat))
return (np.concatenate(Z_hat), sec_per_img) |
def _test():
import torch
pretrained = False
models = [msdnet22_cifar10]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != msdnet22_cifar10) or (weight_count == 4839544))
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 10)) |
class Agent():
def __init__(self, device_name, model_name, num_observations, num_envs, num_threads, training_data_path):
print('Agent init !')
self.num_envs = num_envs
def pre_step(self, observations):
actions = []
for i in range(self.num_envs):
actions.append(random.randrange(0, NUM_ACTIONS))
self.last_action = actions[0]
return actions
def post_step(self, new_observations, rewards, dones, mean_reward, mean_success):
return 0
def get_stats(self):
return [('Action chosen', self.last_action), ('Pi', 3.14159)]
def stop(self):
pass |
def log_logistic(x, mean, logscale):
scale = torch.exp(logscale)
u = ((x - mean) / scale)
logp = ((F.logsigmoid(u) + F.logsigmoid((- u))) - logscale)
return logp |
class ONNXModel(TorchModel):
def __init__(self, with_torch=True):
super().__init__()
self.with_torch: bool = with_torch
self.masked_output_like: Optional[Dict[(str, AbsTensor)]] = None
self.full_output_like: Optional[Dict[(str, AbsTensor)]] = None
self.full_input_like: Optional[Dict[(str, AbsTensor)]] = None
self.onnx_model = None
def version(self) -> str:
return f'onnx{onnx.__version__}-torch{torch.__version__}'
def _mask_outputs(self) -> Dict[(str, AbsTensor)]:
assert (self.torch_model is not None), 'Create a concrete model before masking outputs.'
after_mask = {k: v for (k, v) in self.full_output_like.items() if (random.random() < 0.5)}
if (len(after_mask) == 0):
only_key = random.sample(self.full_output_like.keys(), 1)[0]
after_mask = {only_key: self.full_output_like[only_key]}
return after_mask
def _dce_prob() -> float:
dce_prob = 0.0
dce_env = os.getenv('NNSMITH_ONNX_DCE')
if (dce_env is not None):
if ((not dce_env.replace('.', '', 1).isdigit()) or (not (0 < float(dce_env) < 1))):
raise ValueError(f'NNSMITH_ONNX_DCE must be [0, 1], but got {dce_env}')
dce_prob = float(dce_env)
return dce_prob
def from_gir(cls: Type['ONNXModel'], gir: GraphIR, **kwargs) -> 'ONNXModel':
ret = cls()
ret.torch_model = SymbolNet(gir, **kwargs)
ret.full_input_like = ret.torch_model.input_like
ret.full_output_like = ret.torch_model.output_like
ret.masked_output_like = ret.full_output_like
if (random.random() < cls._dce_prob()):
ret.masked_output_like = ret._mask_outputs()
return ret
def refine_weights(self) -> None:
TorchModel.refine_weights(self)
self.onnx_model = self.get_onnx_from_torch()
if (set(self.masked_output_like.keys()) != set(self.full_output_like)):
self.onnx_model = create_deadcode_onnx(self.onnx_model, self.masked_output_like.keys())
def input_like(self) -> Dict[(str, AbsTensor)]:
return self.full_input_like
def output_like(self) -> Dict[(str, AbsTensor)]:
return self.masked_output_like
def dump(self, path: PathLike) -> None:
if self.with_torch:
TorchModel.dump(self, path.replace(self.name_suffix(), TorchModel.name_suffix()))
if (self.onnx_model is None):
self.onnx_model = self.get_onnx_from_torch()
onnx.checker.check_model(self.onnx_model, full_check=True)
onnx.save(self.onnx_model, path)
def load(cls, path: PathLike) -> 'ONNXModel':
ret = cls()
ret.onnx_model = onnx.load(path)
torch_path = path.replace(cls.name_suffix(), TorchModel.name_suffix())
ret.with_torch = False
(full_input_like, full_output_like) = analyze_onnx_io(ret.onnx_model)
ret.full_input_like = full_input_like
ret.full_output_like = full_output_like
ret.masked_output_like = ret.full_output_like
if os.path.exists(torch_path):
ret.with_torch = True
ret.torch_model = TorchModel.load(torch_path)
ret.full_input_like = ret.torch_model.input_like
ret.full_output_like = ret.torch_model.output_like
return ret
def native_model(self):
if (self.with_torch and (self.onnx_model is None)):
self.onnx_model = self.get_onnx_from_torch()
return self.onnx_model
def get_onnx_from_torch(self) -> onnx.ModelProto:
f = BytesIO()
torch2onnx(self.torch_model, f)
onnx_model = onnx.load_model_from_string(f.getvalue())
onnx_model = update_model_dims.update_inputs_outputs_dims(onnx_model, {k: v.shape for (k, v) in self.torch_model.input_like.items()}, {k: v.shape for (k, v) in self.torch_model.output_like.items()})
onnx.checker.check_model(onnx_model, full_check=True)
return onnx_model
def name_suffix() -> str:
return '.onnx' |
class DiscourseParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message |
def get_device(batch: Any) -> Optional[torch.device]:
if isinstance(batch, dict):
return get_device(list(batch.values()))
elif isinstance(batch, list):
devices = [get_device(d) for d in batch]
for d in devices:
if (d is not None):
return d
else:
return None
elif isinstance(batch, torch.Tensor):
return batch.device
else:
return None |
def sparse_reward_function(state: State) -> chex.Array:
solved = is_solved(state.cube)
return jnp.array(solved, float) |
def draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True):
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, thickness, display_str_list, use_normalized_coordinates)
np.copyto(image, np.array(image_pil)) |
def main():
dataset = LinkPropPredDataset(name='ogbl-vessel')
data = dataset[0]
edge_index = data['edge_index']
print(f'Number of undirected training edges in the graph (accessing edge_index) :{edge_index.shape[1]}')
print(f"Number of nodes in the graph:{data['num_nodes']}")
print('Examining the splits')
split = dataset.get_edge_split()
split_train_pos = split['train']['edge'].cpu().detach().numpy()
split_train_neg = split['train']['edge_neg'].cpu().detach().numpy()
split_test_pos = split['test']['edge'].cpu().detach().numpy()
split_test_neg = split['test']['edge_neg'].cpu().detach().numpy()
split_valid_pos = split['valid']['edge'].cpu().detach().numpy()
split_valid_neg = split['valid']['edge_neg'].cpu().detach().numpy()
print(f'Dimensions of positive training edges: {split_train_pos.shape[0]}')
print(f'Dimensions of positive test edges {split_test_pos.shape[0]}')
print(f'Dimensions of positive validation edges {split_valid_pos.shape[0]}')
print(f'Dimensions of negative training edges {split_train_neg.shape[0]}')
print(f'Dimensions of negative test edges {split_test_neg.shape[0]}')
print(f'Dimensions of negative validaztion edges {split_valid_neg.shape[0]}')
(u, indices) = np.unique(split_train_pos, return_index=True, axis=0)
print('Train edge pos - unique elements', len(u))
(u, indices) = np.unique(split_train_neg, return_index=True, axis=0)
print('Train edge neg - unique elements', len(u))
(u, indices) = np.unique(split_valid_pos, return_index=True, axis=0)
print('Valid edge pos - unique elements', len(u))
(u, indices) = np.unique(split_valid_neg, return_index=True, axis=0)
print('Valid edge neg - unique elements', len(u))
(u, indices) = np.unique(split_test_pos, return_index=True, axis=0)
print('Test edge pos - unique elements', len(u))
(u, indices) = np.unique(split_test_neg, return_index=True, axis=0)
print('Test edge neg - unique elements', len(u))
print('')
array = np.concatenate((split_train_pos, split_train_neg, split_test_pos, split_test_neg, split_valid_pos, split_valid_neg), axis=0)
(u, indices) = np.unique(array, return_index=True, axis=0)
print('All unique elements calculated', len(u))
print(f'All unique elements in theory: {(((split_train_pos.shape[0] * 2) + (split_valid_pos.shape[0] * 2)) + (split_test_pos.shape[0] * 2))}')
print('Is the data representive - e.g. are test and validation set similar to training set')
train_nodes = np.array(split_train_pos).flatten()
valid_nodes = np.array(split_valid_pos).flatten()
test_nodes = np.array(split_test_pos).flatten()
plot_nodes(train_nodes, data, str='train')
plot_nodes(valid_nodes, data, str='valid')
plot_nodes(test_nodes, data, str='test')
print('')
train_pos_hist = plot_dist(split_train_pos, data, 'train_pos')
valid_pos_hist = plot_dist(split_valid_pos, data, 'valid_pos')
test_pos_hist = plot_dist(split_test_pos, data, 'test_pos')
(stat, pvalue) = stats.ks_2samp(train_pos_hist, valid_pos_hist)
print(f'KS-Test stats, p-value train vs valid set {(stat, pvalue)}')
(stat, pvalue) = stats.ks_2samp(train_pos_hist, test_pos_hist)
print(f'KS-Test stats, p-value test vs valid set {(stat, pvalue)}')
(stat, pvalue) = stats.ks_2samp(valid_pos_hist, test_pos_hist)
print(f'KS-Test stats, p-value valid vs test set {(stat, pvalue)}')
train_neg_hist = plot_dist(split_train_neg, data, 'train_neg')
valid_neg_hist = plot_dist(split_valid_neg, data, 'valid_neg')
test_neg_hist = plot_dist(split_test_neg, data, 'test_neg')
(stat, pvalue) = stats.ks_2samp(train_neg_hist, valid_neg_hist)
print(f'KS-Test stats, p-value train neg vs valid neg set {(stat, pvalue)}')
(stat, pvalue) = stats.ks_2samp(train_neg_hist, test_neg_hist)
print(f'KS-Test stats, p-value train neg vs test neg set {(stat, pvalue)}')
(stat, pvalue) = stats.ks_2samp(valid_neg_hist, test_neg_hist)
print(f'KS-Test stats, p-value valid neg vs test neg set {(stat, pvalue)}') |
def get_cifar10_mlp():
nb_classes = 10
batch_size = 64
epochs = 4
input_shape = (3072,)
((x_train, y_train), (x_test, y_test)) = cifar10.load_data()
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs) |
class TestBenchmarkTanhNormalDistribution():
def test_new_tanh_normal(self):
mean = torch.ones(1)
std = torch.ones(1)
dist = TanhNormal(mean, std)
del dist
def test_tanh_normal_bounds(self):
mean = (torch.ones(1) * 100)
std = (torch.ones(1) * 100)
dist = TanhNormal(mean, std)
assert (dist.mean <= 1.0)
del dist
mean = (torch.ones(1) * (- 100))
std = (torch.ones(1) * 100)
dist = TanhNormal(mean, std)
assert (dist.mean >= (- 1.0))
def test_tanh_normal_rsample(self):
mean = torch.zeros(1)
std = torch.ones(1)
dist = TanhNormal(mean, std)
sample = dist.rsample()
(pre_tanh_action, action) = dist.rsample_with_pre_tanh_value()
assert (pre_tanh_action.tanh() == action).all()
assert ((- 1) <= action <= 1.0)
assert ((- 1) <= sample <= 1.0)
del dist
def test_tanh_normal_log_prob(self):
mean = torch.zeros(1)
std = torch.ones(1)
dist = TanhNormal(mean, std)
pre_tanh_action = torch.Tensor([[2.096]])
action = pre_tanh_action.tanh()
log_prob = dist.log_prob(action, pre_tanh_action)
log_prob_approx = dist.log_prob(action)
assert torch.allclose(log_prob, torch.Tensor([(- 0.2798519)]))
assert torch.allclose(log_prob_approx, torch.Tensor([(- 0.2798519)]))
del dist
def test_tanh_normal_expand(self):
mean = torch.zeros(1)
std = torch.ones(1)
dist = TanhNormal(mean, std)
new_dist = dist.expand((2,))
sample = new_dist.sample()
assert (sample.shape == torch.Size((2, 1)))
def test_tanh_normal_repr(self):
mean = torch.zeros(1)
std = torch.ones(1)
dist = TanhNormal(mean, std)
assert (repr(dist) == 'TanhNormal') |
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob('*.npy'))
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name) |
class ShmemVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
if spaces:
(observation_space, action_space) = spaces
else:
logger.log('Creating dummy env object to get spaces')
with logger.scoped_configure(format_strs=[]):
dummy = env_fns[0]()
(observation_space, action_space) = (dummy.observation_space, dummy.action_space)
dummy.close()
del dummy
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
(self.obs_keys, self.obs_shapes, self.obs_dtypes) = obs_space_info(observation_space)
self.obs_bufs = [{k: Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys} for _ in env_fns]
self.parent_pipes = []
self.procs = []
for (env_fn, obs_buf) in zip(env_fns, self.obs_bufs):
wrapped_fn = CloudpickleWrapper(env_fn)
(parent_pipe, child_pipe) = Pipe()
proc = Process(target=_subproc_worker, args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))
proc.daemon = True
self.procs.append(proc)
self.parent_pipes.append(parent_pipe)
proc.start()
child_pipe.close()
self.waiting_step = False
self.viewer = None
def reset(self):
if self.waiting_step:
logger.warn('Called reset() while waiting for the step to complete')
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(('reset', None))
return self._decode_obses([pipe.recv() for pipe in self.parent_pipes])
def step_async(self, actions):
assert (len(actions) == len(self.parent_pipes))
for (pipe, act) in zip(self.parent_pipes, actions):
pipe.send(('step', act))
def step_wait(self):
outs = [pipe.recv() for pipe in self.parent_pipes]
(obs, rews, dones, infos) = zip(*outs)
return (self._decode_obses(obs), np.array(rews), np.array(dones), infos)
def close_extras(self):
if self.waiting_step:
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(('close', None))
for pipe in self.parent_pipes:
pipe.recv()
pipe.close()
for proc in self.procs:
proc.join()
def get_images(self, mode='human'):
for pipe in self.parent_pipes:
pipe.send(('render', None))
return [pipe.recv() for pipe in self.parent_pipes]
def _decode_obses(self, obs):
result = {}
for k in self.obs_keys:
bufs = [b[k] for b in self.obs_bufs]
o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs]
result[k] = np.array(o)
return dict_to_obs(result) |
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True):
if (f_dim_in is None):
f_dim_in = int((f_dim_out / 2))
layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv'))
if batch_norm:
return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm'))
else:
return layer |
class BackendTensorflow(backend.Backend):
def __init__(self):
super(BackendTensorflow, self).__init__()
def version(self):
return ((tf.__version__ + '/') + tf.__git_version__)
def name(self):
return 'tensorflow'
def image_format(self):
return 'NHWC'
def load(self, model_path, inputs=None, outputs=None):
if (not inputs):
raise ValueError('BackendTensorflow needs inputs')
if (not outputs):
raise ValueError('BackendTensorflow needs outputs')
self.outputs = outputs
self.inputs = inputs
infer_config = tf.compat.v1.ConfigProto()
infer_config.intra_op_parallelism_threads = (int(os.environ['TF_INTRA_OP_PARALLELISM_THREADS']) if ('TF_INTRA_OP_PARALLELISM_THREADS' in os.environ) else os.cpu_count())
infer_config.inter_op_parallelism_threads = (int(os.environ['TF_INTER_OP_PARALLELISM_THREADS']) if ('TF_INTER_OP_PARALLELISM_THREADS' in os.environ) else os.cpu_count())
infer_config.use_per_session_threads = 1
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.gfile.FastGFile(model_path, 'rb') as f:
graph_def.ParseFromString(f.read())
for as_datatype_enum in [dtypes.float32.as_datatype_enum, dtypes.uint8.as_datatype_enum]:
try:
optimized_graph_def = optimize_for_inference(graph_def, [item.split(':')[0] for item in inputs], [item.split(':')[0] for item in outputs], as_datatype_enum, False)
graph_def = optimized_graph_def
break
except ValueError:
pass
g = tf.compat.v1.import_graph_def(graph_def, name='')
self.sess = tf.compat.v1.Session(graph=g, config=infer_config)
return self
def predict(self, feed):
return self.sess.run(self.outputs, feed_dict=feed) |
_module()
class SemiLossCPS(nn.Module):
def __init__(self, loss_weight=1.0, avg_non_ignore=True, ignore_index=255):
super(SemiLossCPS, self).__init__()
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self.criterion = CrossEntropyLoss(loss_weight=loss_weight, avg_non_ignore=avg_non_ignore)
def forward(self, strong_logits, weak_logits):
(max_probs, targets_u) = torch.max(weak_logits, dim=1)
loss1 = self.criterion(strong_logits.float(), targets_u.long(), ignore_index=self.ignore_index)
(max_probs2, targets_u2) = torch.max(strong_logits, dim=1)
loss2 = self.criterion(weak_logits.float(), targets_u2.long(), ignore_index=self.ignore_index)
loss = (loss1 + loss2)
return loss |
_model
def seresnext101_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['seresnext101_32x4d']
model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def assign_id_to_src(src_name, src_value):
if isinstance(src_value, six.string_types):
src_id = src_value
else:
try:
src_id = src_value.id
except:
err = '{0} does not have an `id` property. {1} needs to be assigned to either an object with an `id` (like a plotly.grid_objs.Column) or a string. The `id` is a unique identifier assigned by the Plotly webserver to this grid column.'
src_value_str = str(src_value)
err = err.format(src_name, src_value_str)
raise exceptions.InputError(err)
if (src_id == ''):
err = exceptions.COLUMN_NOT_YET_UPLOADED_MESSAGE
err.format(column_name=src_value.name, reference=src_name)
raise exceptions.InputError(err)
return src_id |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.