code stringlengths 101 5.91M |
|---|
def log_config_to_file(cfg, pre='cfg', logger=None):
for (key, val) in cfg.items():
if isinstance(cfg[key], EasyDict):
logger.info(('\n%s.%s = edict()' % (pre, key)))
log_config_to_file(cfg[key], pre=((pre + '.') + key), logger=logger)
continue
logger.info(('%s.%s: %s' % (pre, key, val))) |
class VPG(RLAlgorithm):
def __init__(self, env_spec, policy, value_function, policy_optimizer=None, vf_optimizer=None, max_path_length=500, num_train_per_epoch=1, discount=0.99, gae_lambda=1, center_adv=True, positive_adv=False, policy_ent_coeff=0.0, use_softplus_entropy=False, stop_entropy_gradient=False, entropy_method='no_entropy'):
self.discount = discount
self.policy = policy
self.max_path_length = max_path_length
self._value_function = value_function
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._policy_ent_coeff = policy_ent_coeff
self._use_softplus_entropy = use_softplus_entropy
self._stop_entropy_gradient = stop_entropy_gradient
self._entropy_method = entropy_method
self._n_samples = num_train_per_epoch
self._env_spec = env_spec
self._maximum_entropy = (entropy_method == 'max')
self._entropy_regularzied = (entropy_method == 'regularized')
self._check_entropy_configuration(entropy_method, center_adv, stop_entropy_gradient, policy_ent_coeff)
self._episode_reward_mean = collections.deque(maxlen=100)
self.sampler_cls = OnPolicyVectorizedSampler
if policy_optimizer:
self._policy_optimizer = policy_optimizer
else:
self._policy_optimizer = OptimizerWrapper(torch.optim.Adam, policy)
if vf_optimizer:
self._vf_optimizer = vf_optimizer
else:
self._vf_optimizer = OptimizerWrapper(torch.optim.Adam, value_function)
self._old_policy = copy.deepcopy(self.policy)
def _check_entropy_configuration(entropy_method, center_adv, stop_entropy_gradient, policy_ent_coeff):
if (entropy_method not in ('max', 'regularized', 'no_entropy')):
raise ValueError('Invalid entropy_method')
if (entropy_method == 'max'):
if center_adv:
raise ValueError('center_adv should be False when entropy_method is max')
if (not stop_entropy_gradient):
raise ValueError('stop_gradient should be True when entropy_method is max')
if (entropy_method == 'no_entropy'):
if (policy_ent_coeff != 0.0):
raise ValueError('policy_ent_coeff should be zero when there is no entropy method')
def train_once(self, itr, paths):
(obs, actions, rewards, returns, valids, baselines) = self.process_samples(paths)
if self._maximum_entropy:
policy_entropies = self._compute_policy_entropy(obs)
rewards += (self._policy_ent_coeff * policy_entropies)
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
returns_flat = torch.cat(filter_valids(returns, valids))
advs_flat = self._compute_advantage(rewards, valids, baselines)
with torch.no_grad():
policy_loss_before = self._compute_loss_with_adv(obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_before = self._value_function.compute_loss(obs_flat, returns_flat)
kl_before = self._compute_kl_constraint(obs)
self._train(obs_flat, actions_flat, rewards_flat, returns_flat, advs_flat)
with torch.no_grad():
policy_loss_after = self._compute_loss_with_adv(obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_after = self._value_function.compute_loss(obs_flat, returns_flat)
kl_after = self._compute_kl_constraint(obs)
policy_entropy = self._compute_policy_entropy(obs)
with tabular.prefix(self.policy.name):
tabular.record('/LossBefore', policy_loss_before.item())
tabular.record('/LossAfter', policy_loss_after.item())
tabular.record('/dLoss', (policy_loss_before - policy_loss_after).item())
tabular.record('/KLBefore', kl_before.item())
tabular.record('/KL', kl_after.item())
tabular.record('/Entropy', policy_entropy.mean().item())
with tabular.prefix(self._value_function.name):
tabular.record('/LossBefore', vf_loss_before.item())
tabular.record('/LossAfter', vf_loss_after.item())
tabular.record('/dLoss', (vf_loss_before.item() - vf_loss_after.item()))
self._old_policy.load_state_dict(self.policy.state_dict())
undiscounted_returns = log_performance(itr, TrajectoryBatch.from_trajectory_list(self._env_spec, paths), discount=self.discount)
return np.mean(undiscounted_returns)
def train(self, runner):
last_return = None
for _ in runner.step_epochs():
for _ in range(self._n_samples):
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr, runner.step_path)
runner.step_itr += 1
return last_return
def _train(self, obs, actions, rewards, returns, advs):
for dataset in self._policy_optimizer.get_minibatch(obs, actions, rewards, advs):
self._train_policy(*dataset)
for dataset in self._vf_optimizer.get_minibatch(obs, returns):
self._train_value_function(*dataset)
def _train_policy(self, obs, actions, rewards, advantages):
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step()
return loss
def _train_value_function(self, obs, returns):
self._vf_optimizer.zero_grad()
loss = self._value_function.compute_loss(obs, returns)
loss.backward()
self._vf_optimizer.step()
return loss
def _compute_loss(self, obs, actions, rewards, valids, baselines):
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
advantages_flat = self._compute_advantage(rewards, valids, baselines)
return self._compute_loss_with_adv(obs_flat, actions_flat, rewards_flat, advantages_flat)
def _compute_loss_with_adv(self, obs, actions, rewards, advantages):
objectives = self._compute_objective(advantages, obs, actions, rewards)
if self._entropy_regularzied:
policy_entropies = self._compute_policy_entropy(obs)
objectives += (self._policy_ent_coeff * policy_entropies)
return (- objectives.mean())
def _compute_advantage(self, rewards, valids, baselines):
advantages = compute_advantages(self.discount, self._gae_lambda, self.max_path_length, baselines, rewards)
advantage_flat = torch.cat(filter_valids(advantages, valids))
if self._center_adv:
means = advantage_flat.mean()
variance = advantage_flat.var()
advantage_flat = ((advantage_flat - means) / (variance + 1e-08))
if self._positive_adv:
advantage_flat -= advantage_flat.min()
return advantage_flat
def _compute_kl_constraint(self, obs):
with torch.no_grad():
old_dist = self._old_policy(obs)[0]
new_dist = self.policy(obs)[0]
kl_constraint = torch.distributions.kl.kl_divergence(old_dist, new_dist)
return kl_constraint.mean()
def _compute_policy_entropy(self, obs):
if self._stop_entropy_gradient:
with torch.no_grad():
policy_entropy = self.policy(obs)[0].entropy()
else:
policy_entropy = self.policy(obs)[0].entropy()
if self._use_softplus_entropy:
policy_entropy = F.softplus(policy_entropy)
return policy_entropy
def _compute_objective(self, advantages, obs, actions, rewards):
del rewards
log_likelihoods = self.policy(obs)[0].log_prob(actions)
return (log_likelihoods * advantages)
def process_samples(self, paths):
valids = torch.Tensor([len(path['actions']) for path in paths]).int()
obs = torch.stack([pad_to_last(path['observations'], total_length=self.max_path_length, axis=0) for path in paths])
actions = torch.stack([pad_to_last(path['actions'], total_length=self.max_path_length, axis=0) for path in paths])
rewards = torch.stack([pad_to_last(path['rewards'], total_length=self.max_path_length) for path in paths])
returns = torch.stack([pad_to_last(tu.discount_cumsum(path['rewards'], self.discount).copy(), total_length=self.max_path_length) for path in paths])
with torch.no_grad():
baselines = self._value_function(obs)
return (obs, actions, rewards, returns, valids, baselines) |
def get_new_candidate_chans(chan_info_fp, scores_fp, scores_lang_fp, prev_candidate_fps, out_fp, min_prob=0.9, min_subs=10000):
eng_chan_s = set([])
if (scores_lang_fp is not None):
for line in open(scores_lang_fp):
(chan_id, pred_prob) = line.strip('\n').split('\t')
if (float(pred_prob) >= 0.5):
eng_chan_s.add(chan_id)
prev_cand_chan_s = set([])
for fp in prev_candidate_fps.split(','):
for line in open(fp):
prev_cand_chan_s.add(line.strip())
chan_prob_d = {}
for line in open(scores_fp):
(chan_id, pred_prob) = line.strip('\n').split('\t')
chan_prob_d[chan_id] = float(pred_prob)
cand_s = set([])
new_pos_c = 0
new_heur_c = 0
chan_subs_d = {}
new_pos_tot_subs = 0
for line in open(chan_info_fp):
(chan_id, chan_int, chan_name, chan_scrap_subs, chan_tot_subs) = line.strip('\n').split('\t')
chan_tot_subs_int = int(float(chan_tot_subs))
chan_subs_d[chan_id] = chan_tot_subs_int
if (chan_id not in prev_cand_chan_s):
if ((chan_id in chan_prob_d) and (chan_prob_d[chan_id] >= min_prob) and (chan_tot_subs_int >= min_subs) and ((scores_lang_fp is None) or (chan_id in eng_chan_s))):
new_pos_c += 1
new_pos_tot_subs += chan_tot_subs_int
cand_s.add(chan_id)
elif (chan_tot_subs_int >= 3000000):
cand_s.add(chan_id)
new_heur_c += 1
prev_cand_neg_c = 0
prev_cand_pos_c = 0
prev_can_pos_tot_subs = 0
for chan_id in prev_cand_chan_s:
if (chan_id in chan_prob_d):
if ((chan_prob_d[chan_id] >= min_prob) and ((scores_lang_fp is None) or (chan_id in eng_chan_s))):
prev_cand_pos_c += 1
prev_can_pos_tot_subs += chan_subs_d[chan_id]
else:
prev_cand_neg_c += 1
of = open(out_fp, 'w')
of.write('\n'.join(cand_s))
of.close()
print('All previous candidate negative predictions:', prev_cand_neg_c)
print('All previous candidate positive predictions:', prev_cand_pos_c)
print('All previous candidate positive predictions - Tot Subs:', prev_can_pos_tot_subs)
print('New heuristic chans:', new_heur_c)
print('New positive chans:', new_pos_c)
print('New positive chans - Tot Subs:', new_pos_tot_subs) |
def test_implicit_conversion():
assert (str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == 'EMode.EFirstMode')
assert (str(m.ClassWithUnscopedEnum.EFirstMode) == 'EMode.EFirstMode')
f = m.ClassWithUnscopedEnum.test_function
first = m.ClassWithUnscopedEnum.EFirstMode
second = m.ClassWithUnscopedEnum.ESecondMode
assert (f(first) == 1)
assert (f(first) == f(first))
assert (not (f(first) != f(first)))
assert (f(first) != f(second))
assert (not (f(first) == f(second)))
assert (f(first) == int(f(first)))
assert (not (f(first) != int(f(first))))
assert (f(first) != int(f(second)))
assert (not (f(first) == int(f(second))))
x = {f(first): 1, f(second): 2}
x[f(first)] = 3
x[f(second)] = 4
assert (str(x) == '{EMode.EFirstMode: 3, EMode.ESecondMode: 4}') |
class Encoder_cross(nn.Module):
def __init__(self, config, vis, channel_num):
super(Encoder_cross, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
self.encoder_norm = LayerNorm(channel_num[4], eps=1e-06)
for _ in range(config.transformer['num_layers']):
layer = Block_ViT_cross(config, vis, channel_num)
self.layer.append(copy.deepcopy(layer))
def forward(self, emb, embd):
S = emb
T = embd
for layer_block in self.layer:
(S, T) = layer_block(S, T)
S = self.encoder_norm(S)
T = self.encoder_norm(T)
return (S, T) |
def _matrix_test_right_descent(M, i, n, zero):
for j in range(n):
c = M[(j, i)]
if (c < zero):
return True
elif (c > zero):
return False
raise AssertionError('a zero column, so there must be a bug') |
class EM1D_TD_LineCurrent_Jac_layers_ProblemTests(unittest.TestCase):
def setUp(self):
lm_waveform_times = np.r_[((- 0.001041), (- 0.000985), 0.0, 4e-06)]
lm_waveform_current = np.r_[(0.0, 1.0, 1.0, 0.0)]
hm_waveform_times = np.r_[((- 0.008333), (- 0.008033), 0.0, 5.6e-06)]
hm_waveform_current = np.r_[(0.0, 1.0, 1.0, 0.0)]
lm_off_time = np.array([1.149e-05, 1.35e-05, 1.549e-05, 1.75e-05, 2e-05, 2.299e-05, 2.649e-05, 3.099e-05, 3.7e-05, 4.45e-05, 5.35e-05, 6.499e-05, 7.949e-05, 9.799e-05, 0.0001215, 0.0001505, 0.0001875, 0.000234, 0.000292, 0.0003655, 0.000458, 0.0005745, 0.000721])
hm_off_time = np.array([9.81e-05, 0.0001216, 0.0001506, 0.0001876, 0.0002341, 0.0002921, 0.0003656, 0.0004581, 0.0005746, 0.0007211, 0.0009056, 0.001138, 0.001431, 0.001799, 0.002262, 0.002846, 0.00358, 0.004505, 0.00567, 0.007135])
x_path = np.array([(- 20), (- 20), 20, 20, (- 20)])
y_path = np.array([(- 20), 20, 20, (- 20), (- 20)])
wire_paths = np.c_[(x_path, y_path, np.zeros(5))]
source_list = []
receiver_list_lm = []
receiver_list_hm = []
receiver_location = np.array([[0, 0, 0]])
receiver_orientation = 'z'
receiver_list_lm.append(tdem.receivers.PointMagneticFluxTimeDerivative(receiver_location, times=lm_off_time, orientation=receiver_orientation))
receiver_list_hm.append(tdem.receivers.PointMagneticFluxTimeDerivative(receiver_location, times=hm_off_time, orientation=receiver_orientation))
lm_wave = tdem.sources.PiecewiseLinearWaveform(lm_waveform_times, lm_waveform_current)
hm_wave = tdem.sources.PiecewiseLinearWaveform(hm_waveform_times, hm_waveform_current)
source_lm = tdem.sources.LineCurrent(receiver_list_lm, wire_paths, waveform=lm_wave)
source_hm = tdem.sources.LineCurrent(receiver_list_hm, wire_paths, waveform=hm_wave)
source_list.append(source_lm)
source_list.append(source_hm)
survey = tdem.survey.Survey(source_list)
sigma = np.array([(1.0 / 10), (1.0 / 1)])
thicknesses = np.array([30.0])
self.survey = survey
self.sigma = sigma
self.thicknesses = thicknesses
self.nlayers = (len(thicknesses) + 1)
def test_EM1DTDJvec_Layers(self):
sigma_map = maps.ExpMap(nP=self.nlayers)
sim = tdem.Simulation1DLayered(survey=self.survey, thicknesses=self.thicknesses, sigmaMap=sigma_map)
m_1D = np.log((np.ones(self.nlayers) * self.sigma))
def fwdfun(m):
resp = sim.dpred(m)
return resp
def jacfun(m, dm):
Jvec = sim.Jvec(m, dm)
return Jvec
dm = (m_1D * 0.5)
def derChk(m):
return [fwdfun(m), (lambda mx: jacfun(m, mx))]
passed = tests.check_derivative(derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15)
self.assertTrue(passed)
def test_EM1DTDJtvec_Layers(self):
sigma_map = maps.ExpMap(nP=self.nlayers)
sim = tdem.Simulation1DLayered(survey=self.survey, thicknesses=self.thicknesses, sigmaMap=sigma_map)
sigma_layer = 0.1
sigma = (np.ones(self.nlayers) * self.sigma)
sigma[1] = sigma_layer
m_true = np.log(sigma)
dobs = sim.dpred(m_true)
m_ini = np.log((np.ones(self.nlayers) * self.sigma))
resp_ini = sim.dpred(m_ini)
dr = (resp_ini - dobs)
def misfit(m, dobs):
dpred = sim.dpred(m)
misfit = (0.5 * (np.linalg.norm((dpred - dobs)) ** 2))
dmisfit = sim.Jtvec(m, dr)
return (misfit, dmisfit)
def derChk(m):
return misfit(m, dobs)
passed = tests.check_derivative(derChk, m_ini, num=4, plotIt=False, eps=1e-26)
self.assertTrue(passed) |
class FiniteDimensionalNilpotentLieAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
_base_category_class_and_axiom = (LieAlgebras.FiniteDimensional.WithBasis, 'Nilpotent')
class ParentMethods():
def _test_nilpotency(self, **options):
tester = self._tester(**options)
lcs = self.lower_central_series(submodule=True)
tester.assertEqual(lcs[(- 1)].dimension(), 0, msg='final term of lower central series is non-zero')
step = self.step()
tester.assertEqual((len(lcs) - 1), step, msg=('claimed nilpotency step %d does not match the actual nilpotency step %d' % (step, (len(lcs) - 1))))
def lie_group(self, name='G', **kwds):
from sage.groups.lie_gps.nilpotent_lie_group import NilpotentLieGroup
return NilpotentLieGroup(self, name, **kwds)
def step(self):
if (not hasattr(self, '_step')):
self._step = (len(self.lower_central_series(submodule=True)) - 1)
return self._step
def is_nilpotent(self):
return True |
def test_linesearch_powell_bounded():
linesearch_powell = optimize._optimize._linesearch_powell
def func(x):
return np.sum(((x - np.array([(- 1.0), 2.0, 1.5, (- 0.4)])) ** 2))
p0 = np.array([0.0, 0, 0, 0])
fval = func(p0)
lower_bound = np.array(([(- 2.0)] * 4))
upper_bound = np.array(([2.0] * 4))
all_tests = ((np.array([1.0, 0, 0, 0]), (- 1)), (np.array([0.0, 1, 0, 0]), 2), (np.array([0.0, 0, 1, 0]), 1.5), (np.array([0.0, 0, 0, 1]), (- 0.4)), (np.array([(- 1.0), 0, 1, 0]), 1.25), (np.array([0.0, 0, 1, 1]), 0.55), (np.array([2.0, 0, (- 1), 1]), (- 0.65)))
for (xi, l) in all_tests:
(f, p, direction) = linesearch_powell(func, p0, xi, tol=1e-05, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval)
assert_allclose(f, func((l * xi)), atol=1e-06)
assert_allclose(p, (l * xi), atol=1e-06)
assert_allclose(direction, (l * xi), atol=1e-06)
lower_bound = np.array((([(- 0.3)] * 3) + [(- 1)]))
upper_bound = np.array((([0.45] * 3) + [0.9]))
all_tests = ((np.array([1.0, 0, 0, 0]), (- 0.3)), (np.array([0.0, 1, 0, 0]), 0.45), (np.array([0.0, 0, 1, 0]), 0.45), (np.array([0.0, 0, 0, 1]), (- 0.4)), (np.array([(- 1.0), 0, 1, 0]), 0.3), (np.array([0.0, 0, 1, 1]), 0.45), (np.array([2.0, 0, (- 1), 1]), (- 0.15)))
for (xi, l) in all_tests:
(f, p, direction) = linesearch_powell(func, p0, xi, tol=1e-05, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval)
assert_allclose(f, func((l * xi)), atol=1e-06)
assert_allclose(p, (l * xi), atol=1e-06)
assert_allclose(direction, (l * xi), atol=1e-06)
p0 = np.array([(- 1.0), 0, 0, 2])
fval = func(p0)
all_tests = ((np.array([1.0, 0, 0, 0]), 0.7), (np.array([0.0, 1, 0, 0]), 0.45), (np.array([0.0, 0, 1, 0]), 0.45), (np.array([0.0, 0, 0, 1]), (- 2.4)))
for (xi, l) in all_tests:
(f, p, direction) = linesearch_powell(func, p0, xi, tol=1e-05, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval)
assert_allclose(f, func((p0 + (l * xi))), atol=1e-06)
assert_allclose(p, (p0 + (l * xi)), atol=1e-06)
assert_allclose(direction, (l * xi), atol=1e-06)
p0 = np.array([0.0, 0, 0, 0])
fval = func(p0)
lower_bound = np.array([(- 0.3), (- np.inf), (- np.inf), (- 1)])
upper_bound = np.array([np.inf, 0.45, np.inf, 0.9])
all_tests = ((np.array([1.0, 0, 0, 0]), (- 0.3)), (np.array([0.0, 1, 0, 0]), 0.45), (np.array([0.0, 0, 1, 0]), 1.5), (np.array([0.0, 0, 0, 1]), (- 0.4)), (np.array([(- 1.0), 0, 1, 0]), 0.3), (np.array([0.0, 0, 1, 1]), 0.55), (np.array([2.0, 0, (- 1), 1]), (- 0.15)))
for (xi, l) in all_tests:
(f, p, direction) = linesearch_powell(func, p0, xi, tol=1e-05, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval)
assert_allclose(f, func((l * xi)), atol=1e-06)
assert_allclose(p, (l * xi), atol=1e-06)
assert_allclose(direction, (l * xi), atol=1e-06)
p0 = np.array([(- 1.0), 0, 0, 2])
fval = func(p0)
all_tests = ((np.array([1.0, 0, 0, 0]), 0.7), (np.array([0.0, 1, 0, 0]), 0.45), (np.array([0.0, 0, 1, 0]), 1.5), (np.array([0.0, 0, 0, 1]), (- 2.4)))
for (xi, l) in all_tests:
(f, p, direction) = linesearch_powell(func, p0, xi, tol=1e-05, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval)
assert_allclose(f, func((p0 + (l * xi))), atol=1e-06)
assert_allclose(p, (p0 + (l * xi)), atol=1e-06)
assert_allclose(direction, (l * xi), atol=1e-06) |
def train(train_dataloader, img_encoder, text_encoder, optimizer, criterion, epoch, args):
m_losses = AverageMeter('Loss', ':.4e')
m_top1 = AverageMeter('', ':6.2f')
m_iou = AverageMeter('IoU', ':6.2f')
m_ap50 = AverageMeter('AP50', ':6.2f')
progress = ProgressMeter(len(train_dataloader), [m_losses, m_top1, m_iou, m_ap50], prefix='Epoch: [{}]'.format(epoch))
img_encoder.train()
text_encoder.train()
ignore_index = train_dataloader.dataset.ignore_index
for (i, batch) in enumerate(train_dataloader):
optimizer.zero_grad()
region_proposals = batch['rpn_image'].cuda(non_blocking=True)
command = batch['command'].cuda(non_blocking=True)
command_length = batch['command_length'].cuda(non_blocking=True)
gt = batch['rpn_gt'].cuda(non_blocking=True)
iou = batch['rpn_iou'].cuda(non_blocking=True)
(b, r, c, h, w) = region_proposals.size()
img_features = img_encoder(region_proposals.view((b * r), c, h, w))
norm = img_features.norm(p=2, dim=1, keepdim=True)
img_features = img_features.div(norm)
(_, sentence_features) = text_encoder(command.permute(1, 0), command_length)
norm = sentence_features.norm(p=2, dim=1, keepdim=True)
sentence_features = sentence_features.div(norm)
scores = torch.bmm(img_features.view(b, r, (- 1)), sentence_features.unsqueeze(2)).squeeze()
total_loss = criterion(scores, gt)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
pred = torch.argmax(scores, 1)
pred_bin = F.one_hot(pred, r).bool()
valid = (gt != ignore_index)
num_valid = torch.sum(valid).float().item()
m_top1.update(torch.sum((pred[valid] == gt[valid])).float().item(), num_valid)
m_iou.update(torch.masked_select(iou, pred_bin).sum().float().item(), b)
m_ap50.update((torch.masked_select(iou, pred_bin) > 0.5).sum().float().item(), b)
m_losses.update(total_loss.item())
if ((i % args.print_freq) == 0):
progress.display(i) |
class YogiAdaptiveAggregation(AdaptiveAggregation):
def __init__(self, *, agg_func: AggregationFunction=DEFAULT_AGG_FUNC, params: Optional[Dict[(str, np.ndarray)]]=None, model_interface=None, learning_rate: float=0.01, betas: Tuple[(float, float)]=(0.9, 0.999), initial_accumulator_value: float=0.0, epsilon: float=1e-08) -> None:
opt = NumPyYogi(params=params, model_interface=model_interface, learning_rate=learning_rate, betas=betas, initial_accumulator_value=initial_accumulator_value, epsilo=epsilon)
super().__init__(opt, agg_func) |
def prepare_graph_for_second_network_editor(in_model, representative_data_gen, core_config, fw_info, fw_impl, tpc, target_kpi=None, tb_w=None):
transformed_graph = prepare_graph_for_first_network_editor(in_model=in_model, representative_data_gen=representative_data_gen, core_config=core_config, fw_info=fw_info, fw_impl=fw_impl, tpc=tpc, target_kpi=target_kpi, tb_w=tb_w)
calculate_quantization_params(transformed_graph, fw_info, fw_impl=fw_impl)
if (tb_w is not None):
tb_w.add_graph(transformed_graph, 'thresholds_selection')
tb_w.add_all_statistics(transformed_graph, 'thresholds_selection')
transformed_graph = substitute(transformed_graph, fw_impl.get_substitutions_post_statistics_collection(core_config.quantization_config))
if core_config.quantization_config.shift_negative_activation_correction:
transformed_graph = fw_impl.shift_negative_correction(transformed_graph, core_config, fw_info)
if (tb_w is not None):
tb_w.add_graph(transformed_graph, 'after_shift_negative_correction')
tb_w.add_all_statistics(transformed_graph, 'after_shift_negative_correction')
if (tb_w is not None):
tb_w.add_graph(transformed_graph, 'post_statistics_collection_substitutions')
tb_w.add_all_statistics(transformed_graph, 'post_statistics_collection_substitutions')
tg_with_bias = statistics_correction_runner(transformed_graph, core_config, fw_info, fw_impl, tb_w)
for n in tg_with_bias.nodes:
assert (n.final_weights_quantization_cfg is None)
if (target_kpi is not None):
assert core_config.mixed_precision_enable
if (core_config.mixed_precision_config.configuration_overwrite is None):
bit_widths_config = search_bit_width(tg_with_bias, fw_info, fw_impl, target_kpi, core_config.mixed_precision_config, representative_data_gen)
else:
bit_widths_config = core_config.mixed_precision_config.configuration_overwrite
else:
bit_widths_config = []
tg = set_bit_widths(core_config.mixed_precision_enable, tg_with_bias, bit_widths_config)
edit_network_graph(tg, fw_info, core_config.debug_config.network_editor)
return tg |
class CrossMapLRN2d(Module):
def __init__(self, size, alpha=0.0001, beta=0.75, k=1):
super(CrossMapLRN2d, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input):
return self._backend.CrossMapLRN2d(self.size, self.alpha, self.beta, self.k)(input)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__) |
def lazy_import(module: str, name: str, imports: dict[(str, Callable[([], Any)])], _globals: dict[(str, Any)]) -> Any:
value = _globals.get(name)
if (value is not None):
return value
loader = imports.get(name)
if (loader is not None):
value = loader()
_globals[name] = value
return value
raise AttributeError(f'module {module!r} has no attribute {name!r}') |
def preprocess_for_train(image_bytes, dtype=tf.float32, image_size=IMAGE_SIZE, mean=MEAN_RGB, std=STDDEV_RGB, interpolation=tf.image.ResizeMethod.BICUBIC, augment_name=None, randaug_num_layers=None, randaug_magnitude=None):
image = decode_and_random_crop(image_bytes, image_size, interpolation)
image = tf.image.random_flip_left_right(image)
image = tf.reshape(image, [image_size, image_size, 3])
if augment_name:
logging.info('Apply AutoAugment policy %s', augment_name)
fill_value = [int(round(v)) for v in MEAN_RGB]
image = to_uint8(image, saturate=False)
if (augment_name == 'randaugment'):
image = distort_image_with_randaugment(image, randaug_num_layers, randaug_magnitude, fill_value=fill_value)
else:
raise ValueError(('Invalid value for augment_name: %s' % augment_name))
image = to_float(image)
image = normalize_image(image, mean=mean, std=std)
image = tf.image.convert_image_dtype(image, dtype=dtype)
return image |
class FakeQuantNet(nn.Module):
def __init__(self):
super(FakeQuantNet, self).__init__()
self.fake_quant = torch.quantization.FakeQuantize()
self.fake_quant.disable_observer()
def forward(self, x):
output = self.fake_quant(x)
return output |
def loss_hinge_dis(dis_out_real, dis_out_fake):
return (torch.mean(F.relu((1.0 - dis_out_real))) + torch.mean(F.relu((1.0 + dis_out_fake)))) |
def register_Ns3UanModesListChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UanModesListChecker const &', 'arg0')])
return |
class COCOEvalCap():
def __init__(self, coco, cocoRes):
self.evalImgs = []
self.eval = {}
self.imgToEval = {}
self.coco = coco
self.cocoRes = cocoRes
self.params = {'image_id': coco.getImgIds()}
def evaluate(self):
imgIds = self.params['image_id']
gts = {}
res = {}
for imgId in imgIds:
gts[imgId] = self.coco.imgToAnns[imgId]
res[imgId] = self.cocoRes.imgToAnns[imgId]
print('tokenization...')
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
print('setting up scorers...')
scorers = [(Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), (Meteor(), 'METEOR'), (Rouge(), 'ROUGE_L'), (Cider(), 'CIDEr'), (Spice(), 'SPICE')]
for (scorer, method) in scorers:
print(('computing %s score...' % scorer.method()))
(score, scores) = scorer.compute_score(gts, res)
if (type(method) == list):
for (sc, scs, m) in zip(score, scores, method):
self.setEval(sc, m)
self.setImgToEvalImgs(scs, gts.keys(), m)
print(('%s: %0.3f' % (m, sc)))
else:
self.setEval(score, method)
self.setImgToEvalImgs(scores, gts.keys(), method)
print(('%s: %0.3f' % (method, score)))
self.setEvalImgs()
def setEval(self, score, method):
self.eval[method] = score
def setImgToEvalImgs(self, scores, imgIds, method):
for (imgId, score) in zip(sorted(imgIds), scores):
if (not (imgId in self.imgToEval)):
self.imgToEval[imgId] = {}
self.imgToEval[imgId]['image_id'] = imgId
self.imgToEval[imgId][method] = score
def setEvalImgs(self):
self.evalImgs = [self.imgToEval[imgId] for imgId in sorted(self.imgToEval.keys())] |
def get_containing_span(span):
text = (span.sentence.text + ' ')
(start, end) = (span.char_start, span.char_end)
i = (start - 1)
for i in range((start - 1), 0, (- 1)):
if (text[i] == ' '):
break
j = end
for j in range(end, len(text), 1):
if (text[j] == ' '):
break
return Span(char_start=(i + 1), char_end=(j - 1), sentence=span.sentence) |
class GANTrainer(TowerTrainer):
def __init__(self, model, input_queue):
super().__init__()
inputs_desc = model.get_inputs_desc()
cbs = input_queue.setup(inputs_desc)
self.register_callback(cbs)
self.tower_func = TowerFuncWrapper(model.build_graph, inputs_desc)
with TowerContext('', is_training=True):
self.tower_func(*input_queue.get_input_tensors())
opt = model.get_optimizer()
with tf.name_scope('optimize'):
g_min_grad = opt.compute_gradients(model.g_loss, var_list=model.g_vars)
g_min_grad_clip = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in g_min_grad]
g_min_train_op = opt.apply_gradients(g_min_grad_clip, name='g_op')
with tf.control_dependencies([g_min_train_op]):
d_min_grad = opt.compute_gradients(model.d_loss, var_list=model.d_vars)
d_min_grad_clip = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in d_min_grad]
d_min_train_op = opt.apply_gradients(d_min_grad_clip, name='d_op')
self.train_op = d_min_train_op |
def tree_to_rel_adj(sent_len, tree, directed=False, self_loop=True):
ret = np.zeros((sent_len, sent_len), dtype=np.int)
queue = [tree]
idx = []
while (len(queue) > 0):
(t, queue) = (queue[0], queue[1:])
idx += [t.idx]
for c in t.children:
ret[(t.idx, c.idx)] = t.rel
queue += t.children
if (not directed):
ret = (ret + ret.T)
if self_loop:
for i in idx:
ret[(i, i)] = 1
return ret |
def pythonlib_dir():
if (sys.platform == 'win32'):
return os.path.join(sys.prefix, 'libs')
else:
return get_config_var('LIBDIR') |
def set_logger(log_level='info', fname=None):
import logging as _logging
handler = logging.get_absl_handler()
formatter = _logging.Formatter('%(asctime)s - %(filename)s - %(message)s')
handler.setFormatter(formatter)
logging.set_verbosity(log_level)
if (fname is not None):
handler = _logging.FileHandler(fname)
handler.setFormatter(formatter)
logging.get_absl_logger().addHandler(handler) |
def merge_with_parent(dc: FairseqDataclass, cfg: FairseqDataclass):
merged_cfg = OmegaConf.merge(dc, cfg)
merged_cfg.__dict__['_parent'] = cfg.__dict__['_parent']
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg |
def get_tensors():
ptrs = set([])
out = []
for obj in gc.get_objects():
if torch.is_tensor(obj):
if ((not obj.is_contiguous()) or (obj.data_ptr() in ptrs)):
continue
out.append(obj)
ptrs.add(obj.data_ptr())
return out |
def update_moment(updates, moments, decay, order):
return jax.tree_multimap((lambda g, t: (((1 - decay) * (g ** order)) + (decay * t))), updates, moments) |
class TextRank(KeywordExtractor):
defaults: Dict[(str, Any)] = {'pos': frozenset({'ADJ', 'NOUN', 'PROPN', 'VERB'}), 'window': 3, 'alpha': 0.85, 'tol': 1e-06, 'candidate_selection': 'chunk'}
def candidate_weighting(self, doc: Doc) -> List[Tuple[(Candidate, float)]]:
res = []
G = self.build_graph(doc)
W = nx.pagerank_scipy(G, alpha=self.cfg['alpha'], tol=self.cfg['tol'])
for candidate in doc._.kw_candidates:
chunk_len = len(candidate.lexical_form)
non_lemma = 0
rank = 0.0
for t in candidate.lexical_form:
if (t in W):
rank += W[t]
else:
non_lemma += 1
non_lemma_discount = (chunk_len / ((chunk_len + (2.0 * non_lemma)) + 1.0))
candidate_w = (np.sqrt((rank / (chunk_len + non_lemma))) * non_lemma_discount)
candidate_w += (candidate.offsets[0] * 1e-08)
res.append((candidate, candidate_w))
res.sort(key=(lambda x: x[1]), reverse=True)
return res
def build_graph(self, doc: Doc):
G = nx.Graph()
pos = self.cfg['pos']
window_size = self.cfg['window']
seen = set()
for sent in doc.sents:
for token in sent:
if (token.is_stop or (token.pos_ not in pos)):
continue
node0 = token.lemma_.lower()
if (not G.has_node(node0)):
G.add_node(node0)
for prev_token in sent[max(sent.start, (token.i - window_size)):token.i]:
node1 = prev_token.lemma_.lower()
if ((node0 != node1) and (node1 in seen)):
if G.has_edge(node0, node1):
G[node0][node1]['weight'] += 1
else:
G.add_edge(node0, node1, weight=1)
seen.add(node0)
return G |
def nms(dets, thresh, force_cpu=False):
if (dets.shape[0] == 0):
return []
if (cfg.USE_GPU_NMS and (not force_cpu)):
return gpu_nms(dets, thresh, device_id=0)
else:
return cpu_nms(dets, thresh) |
def sgm2raw(sgm, debug):
to_file = sgm[0:(len(sgm) - len('.sgm'))]
if os.path.exists(to_file):
(debug and print(f'{sgm} already converted to {to_file}; so skip'))
return to_file
cmd = f'{SGM_TOOL} < {sgm} > {to_file}'
call(cmd, debug)
return to_file |
def test_or_dlrne():
(secrets, secret_values, secret_dict) = get_secrets(4)
generators = make_generators(4)
lhs_values = [(x * g) for (x, g) in zip(secret_values, generators)]
y3 = (secret_values[2] * generators[3])
p1 = DLNotEqual([lhs_values[0], generators[0]], [lhs_values[1], generators[1]], secrets[0], bind=True)
p2 = DLNotEqual([lhs_values[1], generators[1]], [y3, generators[3]], secrets[1])
orp = OrProofStmt(p1, p2)
prov = orp.get_prover(secret_dict)
ver = orp.get_verifier()
precom = prov.precommit()
ver.process_precommitment(precom)
com = prov.commit()
chal = ver.send_challenge(com)
resp = prov.compute_response(chal)
assert ver.verify(resp) |
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return ("'%s'" % StringEncoding.escape_char(self.value)) |
def convert_lst20(paths, short_name, include_space_char=True):
assert (short_name == 'th_lst20')
SHARDS = ('train', 'eval', 'test')
BASE_OUTPUT_PATH = paths['NER_DATA_DIR']
input_split = [(os.path.join(paths['NERBASE'], 'thai', 'LST20_Corpus', x), x) for x in SHARDS]
if (not include_space_char):
short_name = (short_name + '_no_ws')
for (input_folder, split_type) in input_split:
text_list = [text for text in os.listdir(input_folder) if (text[0] == 'T')]
if (split_type == 'eval'):
split_type = 'dev'
output_path = os.path.join(BASE_OUTPUT_PATH, ('%s.%s.bio' % (short_name, split_type)))
print(output_path)
with open(output_path, 'w', encoding='utf-8') as fout:
for text in text_list:
lst = []
with open(os.path.join(input_folder, text), 'r', encoding='utf-8') as fin:
lines = fin.readlines()
for (line_idx, line) in enumerate(lines):
x = line.strip().split('\t')
if (len(x) > 1):
if ((x[0] == '_') and (not include_space_char)):
continue
else:
(word, tag) = (x[0], x[2])
if (tag == 'MEA_BI'):
tag = 'B_MEA'
if (tag == 'OBRN_B'):
tag = 'B_BRN'
if (tag == 'ORG_I'):
tag = 'I_ORG'
if (tag == 'PER_I'):
tag = 'I_PER'
if (tag == 'LOC_I'):
tag = 'I_LOC'
if ((tag == 'B') and ((line_idx + 1) < len(lines))):
x_next = lines[(line_idx + 1)].strip().split('\t')
if (len(x_next) > 1):
tag_next = x_next[2]
if (('I_' in tag_next) or ('E_' in tag_next)):
tag = (tag + tag_next[1:])
else:
tag = 'O'
else:
tag = 'O'
if ('_' in tag):
tag = tag.replace('_', '-')
if (('ABB' in tag) or (tag == 'DDEM') or (tag == 'I') or (tag == '__')):
tag = 'O'
fout.write('{}\t{}'.format(word, tag))
fout.write('\n')
else:
fout.write('\n')
convert_bio_to_json(BASE_OUTPUT_PATH, BASE_OUTPUT_PATH, short_name) |
def test_predictor():
tester(input_hdf5='../sampleData&Model/100samples.hdf5', input_testset='test_trainer_outputs/test.npy', input_model='test_trainer_outputs/models/test_trainer_001.h5', output_name='test_tester', detection_threshold=0.2, P_threshold=0.1, S_threshold=0.1, number_of_plots=3, estimate_uncertainty=True, number_of_sampling=2, input_dimention=(6000, 3), normalization_mode='std', mode='generator', batch_size=10, gpuid=None, gpu_limit=None)
dir_list = [ev for ev in os.listdir('.') if (ev.split('_')[(- 1)] == 'outputs')]
if ('test_tester_outputs' in dir_list):
successful = True
else:
successful = False
assert (successful == True) |
def build_wikisql_zero_dataset(folder, template_files):
os.makedirs(folder, exist_ok=True)
table_processor = get_codex_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='gpt2')
def _convert_table_types(_table):
ret_table = deepcopy(_table)
types = ret_table['types']
ret_table['real_rows'] = ret_table['rows']
typed_rows = []
for row in ret_table['rows']:
typed_row = []
for (column, cell_value) in enumerate(row):
typed_row.append(_TYPE_CONVERTER[types[column]](cell_value))
typed_rows.append(typed_row)
ret_table['rows'] = typed_rows
return ret_table
eval_dataset = load_dataset('wikisql', split='test')
prompt_files = template_files
for prompt_file in prompt_files:
prompt_mode = prompt_file.split('.')[(- 1)]
prompt_string = open(os.path.join(TEMPLATE_ROOT_DIR, prompt_file), 'r', encoding='utf8').read().strip()
write_f = open(f'{folder}/wikisql_{prompt_mode}.json', 'w', encoding='utf8')
for (question, table, example_sql) in zip(eval_dataset['question'], eval_dataset['table'], eval_dataset['sql']):
tapas_table = _convert_table_types(table)
answer: List[str] = retrieve_wikisql_query_answer_tapas(tapas_table, example_sql)
for truncate_func in table_processor.table_truncate_funcs:
truncate_func.truncate_table(table, question, [])
linear_table = table_processor.table_linearize_func.process_table(table)
model_input = prompt_string.format(question=question.strip('?'), table=linear_table)
ground_truth = table_processor.process_output(answer=answer)
write_f.write((json.dumps({'input': model_input, 'output': ground_truth, 'metadata': {'question': question, 'table': table, 'answer': answer}}) + '\n'))
write_f.close() |
def compute_importance(config, model, parallel_model, updater, dataloaders, loss_type='l2'):
softmax = torch.nn.Softmax(dim=(- 1))
if (loss_type == 'l2'):
loss_fct = torch.nn.MSELoss(reduction='mean')
elif (loss_type == 'l1'):
loss_fct = torch.nn.L1Loss(reduction='mean')
elif (loss_type == 'ewc'):
assert (config.training.CL.reg_type == 'ewc'), 'loss type is ewc, pls set reg_type to be ewc'
loss_fct = torch.nn.Identity()
if registry.get('data_parallel', False):
loss_fct = torch.nn.DataParallel(loss_fct)
elif registry.get('distributed', False):
loss_fct = torch.nn.parallel.DistributedDataParallel(loss_fct, device_ids=[config.trainer.local_rank], output_device=config.trainer.local_rank, find_unused_parameters=config.training.find_unused_parameters)
else:
loss_fct = loss_fct
for (_, batch) in tqdm(enumerate(dataloaders), total=len(dataloaders)):
updater.zero_grad()
prepared_batch = to_device(batch, config.device)
with torch.cuda.amp.autocast(enabled=config.training.fp16):
model_output = parallel_model(prepared_batch)
report = Report(prepared_batch, model_output)
loss = None
if (config.training.CL.reg_type == 'ewc'):
loss = extract_loss(report, loss_divisor=1)
else:
logits = report['scores']
logits = softmax(logits)
target_zeros = torch.zeros(logits.size()).to(config.device)
loss = loss_fct(logits, target_zeros).mean()
report.detach()
config.trainer.scaler.scale(loss).backward()
config.trainer.scaler.step(updater, model.reg_params, report.batch_size)
config.trainer.scaler.update() |
('/getRandomImage')
def getRandomImage():
category = request.args.get('category')
data = {}
data['image_path'] = ('/static/images/' + category)
random_caption = get_caption(category)
data['upperText'] = random_caption[0]
data['lowerText'] = random_caption[1]
return jsonify(data) |
class d_sunet7128(nn.Module):
def __init__(self, num_classes, pretrained=True, ignore_index=(- 1), weight=None, output_stride='16'):
super(d_sunet7128, self).__init__()
self.num_classes = num_classes
sunet = stackedunet7128(output_stride=output_stride)
sunet = torch.nn.DataParallel(sunet, device_ids=range(torch.cuda.device_count())).cuda()
if pretrained:
checkpoint = torch.load(sunet7128_path)
sunet.load_state_dict(checkpoint['state_dict'])
self.features = sunet.module.features
for (n, m) in self.features.named_modules():
if ('bn' in n):
m.momentum = mom_bn
for (n, m) in self.features.residual1.conv.named_modules():
if ('2' in n):
m.momentum = mom_bn
self.final = nn.Sequential(OrderedDict([('conv1', nn.Conv2d(2304, num_classes, kernel_size=1))]))
self.mceloss = cross_entropy2d(ignore=ignore_index, size_average=False, weight=weight)
def forward(self, x, labels=None, th=1.0):
x_size = x.size()
x = self.features(x)
x = F.relu(x, inplace=False)
x = self.final(x)
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=False)
if (labels is not None):
(losses, total_valid_pixel) = self.mceloss(x, labels, th=th)
(classwise_pixel_acc, classwise_gtpixels, classwise_predpixels) = prediction_stat([x], labels, self.num_classes)
classwise_pixel_acc = Variable(torch.FloatTensor([classwise_pixel_acc]).cuda())
classwise_gtpixels = Variable(torch.FloatTensor([classwise_gtpixels]).cuda())
classwise_predpixels = Variable(torch.FloatTensor([classwise_predpixels]).cuda())
return (x, losses, classwise_pixel_acc, classwise_gtpixels, classwise_predpixels, total_valid_pixel)
else:
return x |
def set_temperature(conditional_strategy, tempering_type, start_temperature, end_temperature, step_count, tempering_step, total_step):
if (conditional_strategy in ['ContraGAN', 'ECGAN']):
if (tempering_type == 'continuous'):
t = (start_temperature + ((step_count * (end_temperature - start_temperature)) / total_step))
elif (tempering_type == 'discrete'):
tempering_interval = (total_step // (tempering_step + 1))
t = (start_temperature + (((step_count // tempering_interval) * (end_temperature - start_temperature)) / tempering_step))
else:
t = start_temperature
else:
t = 'no'
return t |
def show_topics(doc_raw, ldamodel, cleaning=False, combine=False):
if cleaning:
doc_clean = [clean(doc).split() for doc in doc_raw]
else:
doc_clean = doc_raw
if combine:
doc_clean = [[item for sublist in doc_clean for item in sublist]]
corpus = [dictionary.doc2bow(doc) for doc in doc_clean]
print(('Working on a test corpus of length ' + str(len(corpus))))
print('****** Topics found in each document of the test corpus ******')
doc_by_topic = [ldamodel.get_document_topics(bow) for bow in corpus]
for doc in doc_by_topic:
print(doc)
print('')
topic_densities = convertToDense(doc_by_topic, len(ldamodel.get_topics()))
return topic_densities |
class _SCVI_HUB_NT(NamedTuple):
HF_LIBRARY_NAME: str = 'scvi-tools'
MAX_HF_UPLOAD_SIZE: int = .0
METADATA_FILE_NAME: str = '_scvi_required_metadata.json'
MODEL_CARD_FILE_NAME: str = 'README.md'
DEFAULT_MISSING_FIELD: str = 'To be added...'
DEFAULT_NA_FIELD: str = 'N/A'
DEFAULT_PARENT_MODULE: str = 'scvi.model'
MODEL_CLS_NAME_TAG: str = 'model_cls_name:{}'
SCVI_VERSION_TAG: str = 'scvi_version:{}'
ANNDATA_VERSION_TAG: str = 'anndata_version:{}'
MODALITY_TAG: str = 'modality:{}'
TISSUE_TAG: str = 'tissue:{}'
ANNOTATED_TAG: str = 'annotated:{}' |
_LAYERS.register_module(name='MMSyncBN')
class SyncBatchNorm(Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, group=None, stats_mode='default'):
super(SyncBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
group = (dist.group.WORLD if (group is None) else group)
self.group = group
self.group_size = dist.get_world_size(group)
assert (stats_mode in ['default', 'N']), f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
self.stats_mode = stats_mode
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_buffer('running_mean', None)
self.register_buffer('running_var', None)
self.register_buffer('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, input):
if (input.dim() < 2):
raise ValueError(f'expected at least 2D input, got {input.dim()}D input')
if (self.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if (self.training and self.track_running_stats):
if (self.num_batches_tracked is not None):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / float(self.num_batches_tracked))
else:
exponential_average_factor = self.momentum
if (self.training or (not self.track_running_stats)):
return SyncBatchNormFunction.apply(input, self.running_mean, self.running_var, self.weight, self.bias, exponential_average_factor, self.eps, self.group, self.group_size, self.stats_mode)
else:
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, exponential_average_factor, self.eps)
def __repr__(self):
s = self.__class__.__name__
s += f'({self.num_features}, '
s += f'eps={self.eps}, '
s += f'momentum={self.momentum}, '
s += f'affine={self.affine}, '
s += f'track_running_stats={self.track_running_stats}, '
s += f'group_size={self.group_size},'
s += f'stats_mode={self.stats_mode})'
return s |
def CW(model, img, dataset='imagenet', allstep=30, lr=0.03, radius=0.1, margin=20.0, lbd=2, setting='white', noise_radius=0.1, targeted_lr=0.005, targeted_radius=0.03, untargeted_lr=0.1, untargeted_radius=0.03):
model.eval()
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
optimizer = optim.Adam([x_var], lr=lr)
target_label = random_label(true_label, dataset=dataset)
for step in range(allstep):
optimizer.zero_grad()
total_loss = 0
output_ori = model(transform(x_var, dataset=dataset))
(_, top2_1) = output_ori.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == target_label):
argmax11 = top2_1[0][1]
loss1 = ((output_ori[0][argmax11] - output_ori[0][target_label]) + margin).clamp(min=0)
if (setting == 'white'):
total_loss += (lbd * loss1)
noise_var = noisy(x_var, noise_radius)
output_noise = model(transform(noise_var, dataset=dataset))
loss2 = torch.norm((F.softmax(output_noise) - F.softmax(output_ori)), 1)
total_loss += loss2
new_tl = random_label(target_label, dataset=dataset)
new_target = torch.LongTensor([new_tl]).cuda()
t_attack_var = t_attack(model, x_var, new_target, dataset, 1, targeted_lr, targeted_radius)
output_t_attack = model(transform(t_attack_var, dataset=dataset))
(_, top2_3) = output_t_attack.data.cpu().topk(2)
argmax13 = top2_3[0][0]
if (argmax13 == new_tl):
argmax13 = top2_3[0][1]
loss3 = ((output_t_attack[0][argmax13] - output_t_attack[0][new_tl]) + margin).clamp(min=0)
total_loss += loss3
u_attack_var = u_attack(model, x_var, dataset, 1, untargeted_lr, untargeted_radius)
output_u_attack = model(transform(u_attack_var, dataset=dataset))
(_, top2_4) = output_u_attack.data.cpu().topk(2)
argmax14 = top2_4[0][1]
if (argmax14 == target_label):
argmax14 = top2_4[0][0]
loss4 = ((output_u_attack[0][argmax14] - output_u_attack[0][target_label]) + margin).clamp(min=0)
total_loss -= loss4
elif (setting == 'gray'):
total_loss += loss1
else:
raise 'attack setting is not supported'
total_loss.backward()
optimizer.step()
x_var.data = (torch.clamp((torch.clamp(x_var, min=0, max=1) - img), min=(- radius), max=radius) + img)
return x_var |
def recall(predicted_scores, query2target_idx, k: int) -> float:
recall_metric = retrieval_metrics.RetrievalRecall(k=k)
return _call_torchmetrics(recall_metric, predicted_scores, query2target_idx) |
def test_pandas_automatic_categories():
data_source = ACSDataSource(survey_year='2018', horizon='1-Year', survey='person')
ca_data = data_source.get_data(states=['CA'], download=True)
definition_df = data_source.get_definitions(download=True)
features = ACSIncome.features
categories = generate_categories(features=features, definition_df=definition_df)
(pan_features, pan_labels, pan_group) = ACSIncome.df_to_pandas(ca_data)
assert np.isin(list(categories.keys()), features).all()
for feature in categories.keys():
assert np.isin(pan_features[feature].unique(), list(categories[feature].keys())).all()
(pan2_features, pan2_labels, pan2_group) = ACSIncome.df_to_pandas(ca_data, categories=categories)
assert (pan_features.to_numpy().shape == pan2_features.to_numpy().shape)
assert (pan_labels.to_numpy().squeeze().shape == pan2_labels.to_numpy().squeeze().shape)
assert (pan_group.to_numpy().squeeze().shape == pan2_group.to_numpy().squeeze().shape)
for feature in pan_features.columns:
assert (len(pan_features[feature].unique()) == len(pan2_features[feature].unique())) |
class SlurmRuntime(Runtime):
def __init__(self, slurmdir, args, verbose=False, cleanup=True) -> None:
super().__init__()
self.runnable: tp.List[Run] = []
self.slurmdir = slurmdir
self.args = args
self.verbose = verbose
self.cleanup = cleanup
self._start_task: asyncio.Task
def add_run(self, run: Run) -> None:
self.runnable.append(run)
def prep_run(self, run: Run) -> str:
exp = run.experiment
e_idx = ((exp.name + f'-{run.index}') + '.exp')
exp_path = os.path.join(self.slurmdir, e_idx)
log_idx = ((exp.name + f'-{run.index}') + '.log')
exp_log = os.path.join(self.slurmdir, log_idx)
sc_idx = ((exp.name + f'-{run.index}') + '.sh')
exp_script = os.path.join(self.slurmdir, sc_idx)
print(exp_path)
print(exp_log)
print(exp_script)
with open(exp_path, 'wb', encoding='utf-8') as f:
run.prereq = None
pickle.dump(run, f)
with open(exp_script, 'w', encoding='utf-8') as f:
f.write('#!/bin/sh\n')
f.write(f'''#SBATCH -o {exp_log} -e {exp_log}
''')
f.write(f'''#SBATCH --mem={exp.resreq_mem()}M
''')
f.write(f'''#SBATCH --job-name="{run.name()}"
''')
f.write('#SBATCH --exclude=spyder[01-05],spyder16\n')
f.write('#SBATCH -c 32\n')
f.write('#SBATCH --nodes=1\n')
if (exp.timeout is not None):
h = int((exp.timeout / 3600))
m = int(((exp.timeout % 3600) / 60))
s = int((exp.timeout % 60))
f.write(f'''#SBATCH --time={h:02d}:{m:02d}:{s:02d}
''')
extra = ''
if self.verbose:
extra = '--verbose'
f.write(f'''python3 run.py {extra} --pickled {exp_path}
''')
f.write('status=$?\n')
if self.cleanup:
f.write(f'''rm -rf {run.env.workdir}
''')
f.write('exit $status\n')
return exp_script
async def _do_start(self) -> None:
pathlib.Path(self.slurmdir).mkdir(parents=True, exist_ok=True)
jid_re = re.compile('Submitted batch job ([0-9]+)')
for run in self.runnable:
if (run.prereq is None):
dep_cmd = ''
else:
dep_cmd = ('--dependency=afterok:' + str(run.prereq.job_id))
script = self.prep_run(run)
stream = os.popen(f'sbatch {dep_cmd} {script}')
output = stream.read()
result = stream.close()
if (result is not None):
raise RuntimeError('running sbatch failed')
m = jid_re.search(output)
if (m is None):
raise RuntimeError('cannot retrieve id of submitted job')
run.job_id = int(m.group(1))
async def start(self) -> None:
self._start_task = asyncio.create_task(self._do_start())
try:
(await self._start_task)
except asyncio.CancelledError:
job_ids = []
for run in self.runnable:
if run.job_id:
job_ids.append(str(run.job_id))
scancel_process = (await asyncio.create_subprocess_shell(f"scancel {' '.join(job_ids)}"))
(await scancel_process.wait())
def interrupt_handler(self) -> None:
self._start_task.cancel() |
class MultiheadAttention(Module):
__annotations__ = {'bias_k': torch._jit_internal.Optional[torch.Tensor], 'bias_v': torch._jit_internal.Optional[torch.Tensor]}
__constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, use_alibi=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = (kdim if (kdim is not None) else embed_dim)
self.vdim = (vdim if (vdim is not None) else embed_dim)
self._qkv_same_embed_dim = ((self.kdim == embed_dim) and (self.vdim == embed_dim))
self.use_alibi = use_alibi
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = (embed_dim // num_heads)
assert ((self.head_dim * num_heads) == self.embed_dim), 'embed_dim must be divisible by num_heads'
if (self._qkv_same_embed_dim is False):
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim), embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty((3 * embed_dim)))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if (self.in_proj_bias is not None):
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if (self.bias_k is not None):
xavier_normal_(self.bias_k)
if (self.bias_v is not None):
xavier_normal_(self.bias_v)
def __setstate__(self, state):
if ('_qkv_same_embed_dim' not in state):
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None):
if (not self._qkv_same_embed_dim):
return multi_head_attention_forward(query, key, value, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight, use_alibi=self.use_alibi)
else:
return multi_head_attention_forward(query, key, value, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_alibi=self.use_alibi) |
def get_male_dominant_sources(topicsDF, delta=1):
maleSourcesDF = topicsDF.drop('topicDistribution').filter('sourcesMaleCount - sourcesFemaleCount >= {}'.format(delta))
return maleSourcesDF |
def _filter_duplicated(tuples: List[tuple]):
filtered_tuples = []
for tp in tuples:
if (tp not in filtered_tuples):
filtered_tuples.append(tp)
return filtered_tuples |
def _get_model_config(config_file):
cfg = get_cfg()
add_dataset_category_config(cfg)
add_bootstrap_config(cfg)
add_densepose_config(cfg)
add_hrnet_config(cfg)
path = os.path.join(_get_base_config_dir(), config_file)
cfg.merge_from_file(path)
if (not torch.cuda.is_available()):
cfg.MODEL_DEVICE = 'cpu'
return cfg |
def GravSphereFreeSpace(x, y, z, R, xc, yc, zc, rho):
if ((~ np.size(x)) == np.size(y) == np.size(z)):
print('Specify same size of x, y, z')
return
unit_conv = .0
x = mkvc(x)
y = mkvc(y)
z = mkvc(z)
rx = (x - xc)
ry = (y - yc)
rz = (z - zc)
r = np.sqrt((((rx ** 2) + (ry ** 2)) + (rz ** 2)))
M = np.empty_like(x)
M[(r >= R)] = (((((R ** 3) * 4.0) / 3.0) * np.pi) * rho)
M[(r < R)] = (((((r[(r < R)] ** 3) * 4.0) / 3.0) * np.pi) * rho)
g = ((((- G) * (1.0 / (r ** 2))) * M) * unit_conv)
gx = (g * (rx / r))
gy = (g * (ry / r))
gz = (g * (rz / r))
return (gx, gy, gz) |
class showyourwork():
def __init__(self):
self.module = Path(realpath(__file__)).absolute().parents[0]
self.workflow = (self.module / 'workflow')
self.rules = (self.workflow / 'rules')
self.resources = (self.workflow / 'resources')
self.envs = (self.workflow / 'envs')
self.cookiecutter = (self.module / 'cookiecutter-showyourwork') |
def run_decoder(batch_size, max_seq_len, embed_size, num_heads, act='relu', num_iters=100):
config = GPT2Config(n_positions=max_seq_len, n_ctx=max_seq_len, n_embd=embed_size, n_head=num_heads)
class DecoderModel(tf.keras.models.Model):
def __init__(self):
super().__init__()
scale = (1.0 / math.sqrt((embed_size // num_heads)))
self.decoder = TFBlock(max_seq_len, config, scale=scale)
def call(self, inputs):
return self.decoder([inputs, None, None, None], training=True)[0]
decoder_model = DecoderModel()
decoder_model.compile(optimizer=tf.keras.mixed_precision.experimental.LossScaleOptimizer(tf.keras.optimizers.Adam(learning_rate=3e-05, epsilon=1e-08), 'dynamic'), loss=tf.keras.losses.MeanSquaredError())
(x, y) = generate_xy(batch_size, max_seq_len, embed_size)
decoder_model.fit(x, y, batch_size=batch_size, epochs=num_iters) |
def get_layer(layer: Union[(BaseLayer, str)]='conv', **kwargs) -> BaseLayer:
if issubclass(type(layer), BaseLayer):
return layer
elif (type(layer) == str):
layer = layer.lower()
if ('sage' in layer):
kwargs['normalization'] = 'left'
kwargs['self_embeddings'] = True
return Convolution('sage', **kwargs)
elif ('conv' in layer):
return Convolution('conv', **kwargs)
else:
raise ValueError('Layer name must be "Conv" or "Sage".')
else:
raise TypeError('Layer must be a string or a "BaseLayer" object.') |
def AbstractSimplex(dim, degeneracies=(), underlying=None, name=None, latex_name=None):
if degeneracies:
if (underlying is None):
underlying = NonDegenerateSimplex(dim)
return AbstractSimplex_class(dim, degeneracies=degeneracies, underlying=underlying, name=name, latex_name=latex_name)
else:
return NonDegenerateSimplex(dim, name=name, latex_name=latex_name) |
def rotate_point_cloud_by_angle(batch_data, rotation_angle):
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]])
shape_pc = batch_data[(k, ...)]
rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix)
return rotated_data |
class Combiner(object):
def ModelUpdateRequestStream(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelUpdateRequestStream', fedn__pb2.ClientAvailableMessage.SerializeToString, fedn__pb2.ModelUpdateRequest.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ModelUpdateStream(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelUpdateStream', fedn__pb2.ClientAvailableMessage.SerializeToString, fedn__pb2.ModelUpdate.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ModelValidationRequestStream(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelValidationRequestStream', fedn__pb2.ClientAvailableMessage.SerializeToString, fedn__pb2.ModelValidationRequest.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ModelValidationStream(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelValidationStream', fedn__pb2.ClientAvailableMessage.SerializeToString, fedn__pb2.ModelValidation.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def SendModelUpdate(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Combiner/SendModelUpdate', fedn__pb2.ModelUpdate.SerializeToString, fedn__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def SendModelValidation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Combiner/SendModelValidation', fedn__pb2.ModelValidation.SerializeToString, fedn__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class CustomDPRReaderTokenizerMixin():
def __call__(self, questions, titles: Optional[str]=None, texts: Optional[str]=None, padding: Union[(bool, str)]=False, truncation: Union[(bool, str)]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchEncoding:
if ((titles is None) and (texts is None)):
return super().__call__(questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
elif ((titles is None) or (texts is None)):
text_pair = (titles if (texts is None) else texts)
return super().__call__(questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
titles = (titles if (not isinstance(titles, str)) else [titles])
texts = (texts if (not isinstance(texts, str)) else [texts])
n_passages = len(titles)
questions = (questions if (not isinstance(questions, str)) else ([questions] * n_passages))
assert (len(titles) == len(texts)), 'There should be as many titles than texts but got {} titles and {} texts.'.format(len(titles), len(texts))
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)['input_ids']
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)['input_ids']
encoded_inputs = {'input_ids': [((encoded_question_and_title + encoded_text)[:max_length] if ((max_length is not None) and truncation) else (encoded_question_and_title + encoded_text)) for (encoded_question_and_title, encoded_text) in zip(encoded_question_and_titles, encoded_texts)]}
if (return_attention_mask is not False):
attention_mask = [(input_ids != self.pad_token_id) for input_ids in encoded_inputs['input_ids']]
encoded_inputs['attention_mask'] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int=16, max_answer_length: int=64, num_spans_per_passage: int=4) -> List[DPRSpanPrediction]:
input_ids = reader_input['input_ids']
(start_logits, end_logits, relevance_logits) = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
passage_offset = (sequence_ids.index(self.sep_token_id, 2) + 1)
if (sequence_ids[(- 1)] == self.pad_token_id):
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage)
for (start_index, end_index) in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(DPRSpanPrediction(span_score=(start_logits[doc_id][start_index] + end_logits[doc_id][end_index]), relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index:(end_index + 1)])))
if (len(nbest_spans_predictions) >= num_spans):
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(self, start_logits: List[int], end_logits: List[int], max_answer_length: int, top_spans: int) -> List[DPRSpanPrediction]:
scores = []
for (start_index, start_score) in enumerate(start_logits):
for (answer_length, end_score) in enumerate(end_logits[start_index:(start_index + max_answer_length)]):
scores.append(((start_index, (start_index + answer_length)), (start_score + end_score)))
scores = sorted(scores, key=(lambda x: x[1]), reverse=True)
chosen_span_intervals = []
for ((start_index, end_index), score) in scores:
assert (start_index <= end_index), 'Wrong span indices: [{}:{}]'.format(start_index, end_index)
length = ((end_index - start_index) + 1)
assert (length <= max_answer_length), 'Span is too long: {} > {}'.format(length, max_answer_length)
if any([((start_index <= prev_start_index <= prev_end_index <= end_index) or (prev_start_index <= start_index <= end_index <= prev_end_index)) for (prev_start_index, prev_end_index) in chosen_span_intervals]):
continue
chosen_span_intervals.append((start_index, end_index))
if (len(chosen_span_intervals) == top_spans):
break
return chosen_span_intervals |
def GetNodeInDegV_PDirNet(Graph, NIdInDegV):
return _snap.GetNodeInDegV_PDirNet(Graph, NIdInDegV) |
def test_count_nonzeroaxis_None():
array = ak.highlevel.Array([[[np.datetime64('2022'), np.datetime64('2023'), np.datetime64('2025')], [], [np.datetime64('2027'), np.datetime64('2011')], [np.datetime64('2013')]], [], [[np.datetime64('2017'), np.datetime64('2019')], [np.datetime64('2023')]]], check_valid=True)
assert (ak.operations.count_nonzero(array) == 9) |
(scope='module')
def geodf() -> dd.DataFrame:
df = df = load_dataset('countries')
ddf = to_dask(df)
return ddf |
def get_graph(influences_collections_list: List[List[Dict[(int, float)]]], train_vertex_color_map_fn: Optional[Callable[([int], int)]]=None, train_vertex_radius_map_fn: Optional[Callable[([int], int)]]=None, eval_vertex_radius: Optional[int]=None, eval_vertex_color_base: Optional[int]=None) -> gt_Graph_t:
if (train_vertex_color_map_fn is None):
def train_vertex_color_map_fn(index: int) -> int:
return DEFAULT_TRAIN_VERTEX_COLOR
if (train_vertex_radius_map_fn is None):
def train_vertex_radius_map_fn(index: int) -> int:
return DEFAULT_TRAIN_VERTEX_RADIUS
if (eval_vertex_radius is None):
eval_vertex_radius = DEFAULT_EVAL_VERTEX_RADIUS
if (eval_vertex_color_base is None):
eval_vertex_color_base = DEFAULT_EVAL_VERTEX_COLORS_BASE
if (train_vertex_color_map_fn is None):
raise ValueError
if (train_vertex_radius_map_fn is None):
raise ValueError
NUM_INFLUENCE_COLLECTIONS = len(influences_collections_list)
influences_collections_list_flatten = []
for influences_collections in influences_collections_list:
if (len(influences_collections_list[0][0]) != len(influences_collections[0])):
raise ValueError
influences_collections_list_flatten.extend(influences_collections)
(possible_datapoints, datapoints_map) = get_datapoints_map(influences_collections=influences_collections_list_flatten)
g = gt.Graph(directed=True)
e_colors = g.new_edge_property('int')
e_weights = g.new_edge_property('double')
e_signed_influences = g.new_edge_property('double')
e_unsigned_influences = g.new_edge_property('double')
v_sizes = g.new_vertex_property('int')
v_colors = g.new_vertex_property('int')
v_radius = g.new_vertex_property('int')
v_data_indices = g.new_vertex_property('string')
v_positions = g.new_vertex_property('vector<double>')
v_positive_positions = g.new_vertex_property('vector<double>')
v_negative_positions = g.new_vertex_property('vector<double>')
train_vertices = []
eval_vertices_collections = []
for datapoint_index in trange(len(possible_datapoints)):
v = g.add_vertex()
v_sizes[v] = DEFAULT_TRAIN_VERTEX_SIZE
v_colors[v] = train_vertex_color_map_fn(possible_datapoints[datapoint_index])
v_radius[v] = train_vertex_radius_map_fn(possible_datapoints[datapoint_index])
v_data_indices[v] = f'train-{possible_datapoints[datapoint_index]}'
train_vertices.append(v)
for (i, influences_collections) in enumerate(influences_collections_list):
eval_vertices = []
for datapoint_index in trange(len(influences_collections)):
v = g.add_vertex()
v_sizes[v] = 10
v_colors[v] = (eval_vertex_color_base + i)
v_radius[v] = eval_vertex_radius
v_data_indices[v] = f'eval-{i}-{datapoint_index}'
base_degree = ((360 / NUM_INFLUENCE_COLLECTIONS) * i)
fine_degree = (((360 / NUM_INFLUENCE_COLLECTIONS) / len(influences_collections)) * datapoint_index)
x_y_coordinate = get_circle_coordinates(r=eval_vertex_radius, degree=(base_degree + fine_degree))
position = np.random.normal(x_y_coordinate, 0.1)
v_positions[v] = position
v_positive_positions[v] = position
v_negative_positions[v] = position
eval_vertices.append(v)
eval_vertices_collections.append(eval_vertices)
def add_edges(influences_collections: List[Dict[(int, float)]], eval_vertices: List[gt.Vertex]) -> None:
for (eval_index, influences) in enumerate(tqdm(influences_collections)):
for (train_index, train_influence) in influences.items():
if (train_influence < 0.0):
train_vertex = train_vertices[datapoints_map[train_index]]
eval_vertex = eval_vertices[eval_index]
e = g.add_edge(train_vertex, eval_vertex)
e_colors[e] = DEFAULT_HELPFUL_EDGE_COLOR
e_weights[e] = np.abs(train_influence)
e_signed_influences[e] = train_influence
e_unsigned_influences[e] = np.abs(train_influence)
else:
train_vertex = train_vertices[datapoints_map[train_index]]
eval_vertex = eval_vertices[eval_index]
e = g.add_edge(train_vertex, eval_vertex)
e_colors[e] = DEFAULT_HARMFUL_EDGE_COLOR
e_weights[e] = np.abs(train_influence)
e_signed_influences[e] = train_influence
e_unsigned_influences[e] = np.abs(train_influence)
for (i, influences_collections) in enumerate(influences_collections_list):
add_edges(influences_collections, eval_vertices_collections[i])
def _calculate_position(train_vertex: gt.Vertex) -> None:
_positive_points = []
_negative_points = []
_positive_influences = []
_negative_influences = []
for e in train_vertex.all_edges():
target = e.target()
if (e_signed_influences[e] > 0):
_positive_points.append(v_positions[target])
_positive_influences.append(e_unsigned_influences[e])
else:
_negative_points.append(v_positions[target])
_negative_influences.append(e_unsigned_influences[e])
bound = (1.4 * v_radius[train_vertex])
constraints = {'type': 'ineq', 'fun': get_within_circle_constraint(v_radius[train_vertex])}
if (len(_positive_influences) == 0):
_positive_xval = 0.0
_positive_yval = 0.0
else:
_positive_points_stacked = np.stack(_positive_points, axis=0)
_positive_influences_stacked = np.stack(_positive_influences, axis=0)
_positive_optimize_result = minimize(distance_to_points_within_circle_vectorized, x0=(0, 0), constraints=constraints, bounds=(((- bound), bound), ((- bound), bound)), args=(_positive_influences_stacked, _positive_points_stacked))
(_positive_xval, _positive_yval) = _positive_optimize_result.x
if (len(_negative_influences) == 0):
_negative_xval = 0.0
_negative_yval = 0.0
else:
_negative_points_stacked = np.stack(_negative_points, axis=0)
_negative_influences_stacked = np.stack(_negative_influences, axis=0)
_negative_optimize_result = minimize(distance_to_points_within_circle_vectorized, x0=(0, 0), constraints=constraints, bounds=(((- bound), bound), ((- bound), bound)), args=(_negative_influences_stacked, _negative_points_stacked))
(_negative_xval, _negative_yval) = _negative_optimize_result.x
_positive_xval = np.random.normal(_positive_xval, 0.01)
_negative_xval = np.random.normal(_negative_xval, 0.01)
_positive_yval = np.random.normal(_positive_yval, 0.01)
_negative_yval = np.random.normal(_negative_yval, 0.01)
v_positive_positions[train_vertex] = np.array([_positive_xval, _positive_yval])
v_negative_positions[train_vertex] = np.array([_negative_xval, _negative_yval])
v_positions[train_vertex] = np.array([((_positive_xval + _negative_xval) / 2), ((_positive_yval + _negative_yval) / 2)])
for train_vertex in tqdm(train_vertices):
_calculate_position(train_vertex)
g.edge_properties['colors'] = e_colors
g.edge_properties['weights'] = e_weights
g.edge_properties['signed_influences'] = e_signed_influences
g.edge_properties['unsigned_influences'] = e_unsigned_influences
g.vertex_properties['sizes'] = v_sizes
g.vertex_properties['colors'] = v_colors
g.vertex_properties['radius'] = v_radius
g.vertex_properties['data_indices'] = v_data_indices
g.vertex_properties['positions'] = v_positions
g.vertex_properties['positive_positions'] = v_positive_positions
g.vertex_properties['negative_positions'] = v_negative_positions
return (g, {'train_vertices': train_vertices, 'eval_vertices_collections': eval_vertices_collections}) |
class JSONMixin(_JSONMixin):
json_module = json
def on_json_loading_failed(self, e):
if (current_app and current_app.debug):
raise BadRequest('Failed to decode JSON object: {0}'.format(e))
raise BadRequest() |
class DynamicRNN(nn.Module):
def __init__(self, rnn_model):
super().__init__()
self.rnn_model = rnn_model
def forward(self, seq_input, seq_lens, initial_state=None):
max_sequence_length = seq_input.size(1)
(sorted_len, fwd_order, bwd_order) = self._get_sorted_order(seq_lens)
sorted_seq_input = seq_input.index_select(0, fwd_order)
packed_seq_input = pack_padded_sequence(sorted_seq_input, lengths=sorted_len, batch_first=True)
if (initial_state is not None):
hx = initial_state
assert (hx[0].size(0) == self.rnn_model.num_layers)
else:
sorted_hx = None
self.rnn_model.flatten_parameters()
(outputs, (h_n, c_n)) = self.rnn_model(packed_seq_input, sorted_hx)
h_n = h_n[(- 1)].index_select(dim=0, index=bwd_order)
c_n = c_n[(- 1)].index_select(dim=0, index=bwd_order)
outputs = pad_packed_sequence(outputs, batch_first=True, total_length=max_sequence_length)[0].index_select(dim=0, index=bwd_order)
return (outputs, (h_n, c_n))
def _get_sorted_order(lens):
(sorted_len, fwd_order) = torch.sort(lens.contiguous().view((- 1)), 0, descending=True)
(_, bwd_order) = torch.sort(fwd_order)
sorted_len = list(sorted_len)
return (sorted_len, fwd_order, bwd_order) |
def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True, fps_source='tbr'):
is_GIF = filename.endswith('.gif')
cmd = [get_setting('FFMPEG_BINARY'), '-i', filename]
if is_GIF:
cmd += ['-f', 'null', '/dev/null']
popen_params = {'bufsize': (10 ** 5), 'stdout': sp.PIPE, 'stderr': sp.PIPE, 'stdin': DEVNULL}
if (os.name == 'nt'):
popen_params['creationflags'] =
proc = sp.Popen(cmd, **popen_params)
(output, error) = proc.communicate()
infos = error.decode('utf8')
del proc
if print_infos:
print(infos)
lines = infos.splitlines()
if ('No such file or directory' in lines[(- 1)]):
raise IOError(('MoviePy error: the file %s could not be found!\nPlease check that you entered the correct path.' % filename))
result = dict()
result['duration'] = None
if check_duration:
try:
keyword = ('frame=' if is_GIF else 'Duration: ')
index = ((- 1) if is_GIF else 0)
line = [l for l in lines if (keyword in l)][index]
match = re.findall('([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])', line)[0]
result['duration'] = cvsecs(match)
except:
raise IOError(('MoviePy error: failed to read the duration of file %s.\nHere are the file infos returned by ffmpeg:\n\n%s' % (filename, infos)))
lines_video = [l for l in lines if ((' Video: ' in l) and re.search('\\d+x\\d+', l))]
result['video_found'] = (lines_video != [])
if result['video_found']:
try:
line = lines_video[0]
match = re.search(' [0-9]*x[0-9]*(,| )', line)
s = list(map(int, line[match.start():(match.end() - 1)].split('x')))
result['video_size'] = s
except:
raise IOError(('MoviePy error: failed to read video dimensions in file %s.\nHere are the file infos returned by ffmpeg:\n\n%s' % (filename, infos)))
def get_tbr():
match = re.search('( [0-9]*.| )[0-9]* tbr', line)
s_tbr = line[match.start():match.end()].split(' ')[1]
if ('k' in s_tbr):
tbr = (float(s_tbr.replace('k', '')) * 1000)
else:
tbr = float(s_tbr)
return tbr
def get_fps():
match = re.search('( [0-9]*.| )[0-9]* fps', line)
fps = float(line[match.start():match.end()].split(' ')[1])
return fps
if (fps_source == 'tbr'):
try:
result['video_fps'] = get_tbr()
except:
result['video_fps'] = get_fps()
elif (fps_source == 'fps'):
try:
result['video_fps'] = get_fps()
except:
result['video_fps'] = get_tbr()
coef = (1000.0 / 1001.0)
fps = result['video_fps']
for x in [23, 24, 25, 30, 50]:
if ((fps != x) and (abs((fps - (x * coef))) < 0.01)):
result['video_fps'] = (x * coef)
if check_duration:
result['video_nframes'] = (int((result['duration'] * result['video_fps'])) + 1)
result['video_duration'] = result['duration']
else:
result['video_nframes'] = 1
result['video_duration'] = None
try:
rotation_lines = [l for l in lines if (('rotate :' in l) and re.search('\\d+$', l))]
if len(rotation_lines):
rotation_line = rotation_lines[0]
match = re.search('\\d+$', rotation_line)
result['video_rotation'] = int(rotation_line[match.start():match.end()])
else:
result['video_rotation'] = 0
except:
raise IOError(('MoviePy error: failed to read video rotation in file %s.\nHere are the file infos returned by ffmpeg:\n\n%s' % (filename, infos)))
lines_audio = [l for l in lines if (' Audio: ' in l)]
result['audio_found'] = (lines_audio != [])
if result['audio_found']:
line = lines_audio[0]
try:
match = re.search(' [0-9]* Hz', line)
hz_string = line[(match.start() + 1):(match.end() - 3)]
result['audio_fps'] = int(hz_string)
except:
result['audio_fps'] = 'unknown'
return result |
class Function_lambert_w(BuiltinFunction):
def __init__(self):
BuiltinFunction.__init__(self, 'lambert_w', nargs=2, conversions={'mathematica': 'ProductLog', 'maple': 'LambertW', 'matlab': 'lambertw', 'maxima': 'generalized_lambert_w', 'fricas': "((n,z)+->(if n=0 then lambertW(z) else operator('generalizedLambertW)(n,z)))", 'sympy': 'LambertW'})
def __call__(self, *args, **kwds):
if (len(args) == 2):
return BuiltinFunction.__call__(self, *args, **kwds)
elif (len(args) == 1):
return BuiltinFunction.__call__(self, 0, args[0], **kwds)
else:
raise TypeError('lambert_w takes either one or two arguments.')
def _method_arguments(self, n, z):
if (n == 0):
return [z]
else:
return [z, n]
def _eval_(self, n, z):
if (not isinstance(z, Expression)):
if ((n == 0) and (z == 0)):
return s_parent(z)(0)
elif (n == 0):
if z.is_trivial_zero():
return s_parent(z)(Integer(0))
elif (z - const_e).is_trivial_zero():
return s_parent(z)(Integer(1))
elif (z + (1 / const_e)).is_trivial_zero():
return s_parent(z)(Integer((- 1)))
def _evalf_(self, n, z, parent=None, algorithm=None):
R = (parent or s_parent(z))
if ((R is float) or (R is RDF)):
res = _scipy_lambertw(z, n)
if (not res.imag):
return R(res.real)
elif (R is float):
return complex(res)
else:
return CDF(res)
elif ((R is complex) or (R is CDF)):
return R(_scipy_lambertw(z, n))
else:
return _mpmath_utils_call(_mpmath_lambertw, z, n, parent=R)
def _derivative_(self, n, z, diff_param=None):
if (diff_param == 0):
raise ValueError('cannot differentiate lambert_w in the first parameter')
return (lambert_w(n, z) / ((z * lambert_w(n, z)) + z))
def _maxima_init_evaled_(self, n, z):
if isinstance(z, str):
maxima_z = z
elif hasattr(z, '_maxima_init_'):
maxima_z = z._maxima_init_()
else:
maxima_z = str(z)
if (n == 0):
return ('lambert_w(%s)' % maxima_z)
else:
return ('generalized_lambert_w(%s,%s)' % (n, maxima_z))
def _print_(self, n, z):
if (n == 0):
return ('lambert_w(%s)' % z)
else:
return ('lambert_w(%s, %s)' % (n, z))
def _print_latex_(self, n, z):
if (n == 0):
return ('\\operatorname{W}({%s})' % z._latex_())
else:
return ('\\operatorname{W_{%s}}({%s})' % (n, z._latex_())) |
def top_similar_vectors(key_vector: np.array, candidate_vectors: List[np.array]) -> List[tuple]:
cos_scores = util.cos_sim(key_vector, np.asarray(candidate_vectors))[0]
top_results = torch.topk(cos_scores, k=len(candidate_vectors))
top_cos_scores = top_results[0].detach().cpu().numpy()
top_indices = top_results[1].detach().cpu().numpy()
return list(zip(top_cos_scores, top_indices)) |
def compute_fitness(chromesome, words_2, codebert_tgt, tokenizer_tgt, orig_prob, orig_label, true_label, code, names_positions_dict, args):
temp_code = map_chromesome(chromesome, code, 'java')
temp_code = ' '.join(temp_code.split())
temp_code = tokenizer_tgt.tokenize(temp_code)
new_feature = convert_examples_to_features(temp_code, words_2, true_label, None, None, tokenizer_tgt, args, None)
new_dataset = CodeDataset([new_feature])
(new_logits, preds) = codebert_tgt.get_results(new_dataset, args.eval_batch_size)
fitness_value = (orig_prob - new_logits[0][orig_label])
return (fitness_value, preds[0]) |
def rewrite_with_label(char, label, apply_rewrites):
if (label == 'BEGIN'):
return (SEG_MARKER + char)
elif (label == 'CONT'):
return char
elif (label == 'REW'):
if (char == u''):
return u':'
elif apply_rewrites:
if (char in u''):
return u''
elif (char == u''):
return u''
elif (char == u''):
return u''
elif (char == u''):
return u''
else:
return char
else:
assert False, ('unrecognized label: ' + label) |
def test_get_random_object_all(simple_test_case):
assert (simple_test_case.get_random_object(simple_test_case.test_cluster.type_system.convert_type_hint(int), simple_test_case.size()) in [simple_test_case.statements[0].ret_val, simple_test_case.statements[1].ret_val]) |
class ResidualExplanation(ExplanationBase):
def __init__(self, predictions, residuals, residual_type):
super().__init__()
self.predictions = predictions
self.residuals = residuals
self.residual_type = residual_type
def get_explanations(self):
return {'prediction': self.predictions, 'residual': self.residuals}
def plot(self, markersize=5, linewidth=2, **kwargs):
import matplotlib.pyplot as plt
indices = np.argsort(self.predictions)
predictions = self.predictions[indices]
residuals = self.residuals[indices]
fig = plt.figure()
plt.plot(predictions, residuals, 'o', markersize=markersize, label=f'Residuals ({self.residual_type})')
if (self.residual_type == 'ratio'):
plt.plot(predictions, np.ones(predictions.shape), color='orange', linewidth=linewidth, label='Baseline')
else:
plt.plot(predictions, np.zeros(predictions.shape), color='orange', linewidth=linewidth, label='Baseline')
plt.xlabel('Prediction')
plt.ylabel('Residual')
plt.title('Regression Residuals')
plt.legend(loc='upper right')
plt.grid()
return fig
def _plotly_figure(self, markersize=5, linewidth=2, **kwargs):
import plotly.graph_objects as go
indices = np.argsort(self.predictions)
predictions = self.predictions[indices]
residuals = self.residuals[indices]
fig = go.Figure()
fig.add_trace(go.Scatter(mode='markers', x=predictions, y=residuals, marker=dict(color='#1f77b4', size=markersize), name=f'Residuals ({self.residual_type})'))
if (self.residual_type == 'ratio'):
fig.add_trace(go.Scatter(x=predictions, y=np.ones(predictions.shape), line=dict(color='#ff7f0e', width=linewidth), name='Baseline'))
else:
fig.add_trace(go.Scatter(x=predictions, y=np.zeros(predictions.shape), line=dict(color='#ff7f0e', width=linewidth), name='Baseline'))
fig.update_layout(xaxis_title='Prediction', yaxis_title='Residual', title={'text': 'Regression Residuals'})
return fig
def plotly_plot(self, **kwargs):
return DashFigure(self._plotly_figure(**kwargs))
def ipython_plot(self, **kwargs):
import plotly
plotly.offline.iplot(self._plotly_figure(**kwargs))
def from_dict(cls, d):
return ResidualExplanation(predictions=np.array(d['predictions']), residuals=np.array(d['residuals']), residual_type=d['residual_type']) |
def pload(model, filename):
file = os.path.join(model.dirname, (filename + '_pdump.pkl'))
if (not os.path.isfile(file)):
raise FileNotFoundError((file + " doesn't exist"))
return pickle.load(open(file, 'rb')) |
def separate_independent_kernel_two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
x_shape = x.shape[(- 1)]
num_data = len(x)
Z = x.copy()
kernel_list = [gpflow.kernels.SquaredExponential(variance=tf.exp(tf.random.normal([], dtype=gpflow.default_float())), lengthscales=tf.exp(tf.random.normal([], dtype=gpflow.default_float()))) for _ in range(x_shape)]
kernel_1 = construct_basic_kernel(kernel_list)
inducing_variable_1 = gpflow.inducing_variables.SharedIndependentInducingVariables(gpflow.inducing_variables.InducingPoints(Z.copy()))
gp_layer_1 = GPLayer(kernel_1, inducing_variable_1, num_data=num_data, num_latent_gps=x_shape)
kernel_2 = gpflow.kernels.SquaredExponential()
inducing_variable_2 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_2 = GPLayer(kernel_2, inducing_variable_2, num_data=num_data, num_latent_gps=1, mean_function=gpflow.mean_functions.Zero())
return DeepGP([gp_layer_1, gp_layer_2], gpflow.likelihoods.Gaussian(0.01)) |
def prep_model():
adata = scvi.data.synthetic_iid()
scvi.model.SCVI.setup_anndata(adata)
model = scvi.model.SCVI(adata)
model.train(1)
return model |
('dependency_label')
class DepLabelIndexer(TokenIndexer[int]):
def __init__(self, namespace: str='dep_labels') -> None:
self.namespace = namespace
self._logged_errors: Set[str] = set()
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
dep_label = token.dep_
if (not dep_label):
if (token.text not in self._logged_errors):
logger.warning('Token had no dependency label: %s', token.text)
self._logged_errors.add(token.text)
dep_label = 'NONE'
counter[self.namespace][dep_label] += 1
def token_to_indices(self, token: Token, vocabulary: Vocabulary) -> int:
dep_label = (token.dep_ or 'NONE')
return vocabulary.get_token_index(dep_label, self.namespace)
def get_padding_token(self) -> int:
return 0
def get_padding_lengths(self, token: int) -> Dict[(str, int)]:
return {}
def pad_token_sequence(self, tokens: List[int], desired_num_tokens: int, padding_lengths: Dict[(str, int)]) -> List[int]:
return pad_sequence_to_length(tokens, desired_num_tokens)
def from_params(cls, params: Params) -> 'DepLabelIndexer':
namespace = params.pop('namespace', 'dep_labels')
params.assert_empty(cls.__name__)
return cls(namespace=namespace) |
def as_mask(shape, x_coord, y_coord, radii):
ygrid = np.arange(shape[0])
xgrid = np.arange(shape[1])
(xgrid, ygrid) = np.meshgrid(xgrid, ygrid, indexing='xy')
mask = np.zeros(shape, dtype=np.uint8)
for i in range(len(x_coord)):
x = x_coord[i]
y = y_coord[i]
radius = radii[i]
threshold = (radius ** 2)
d2 = (((xgrid - x) ** 2) + ((ygrid - y) ** 2))
mask += (d2 <= threshold)
mask = np.clip(mask, 0, 1)
return mask |
def run_diagnostic(real_data, synthetic_data, metadata, verbose=True):
diagnostic_report = DiagnosticReport()
diagnostic_report.generate(real_data, synthetic_data, metadata.to_dict(), verbose)
return diagnostic_report |
class DataIterator():
def __init__(self, source, buckets, uid_voc, mid_voc, cat_voc, batch_size=128, maxlen=100, skip_empty=False, shuffle_each_epoch=False, sort_by_length=True, max_batch_size=20, minlen=None):
if shuffle_each_epoch:
self.source_orig = source
self.source = shuffle.main(self.source_orig, temporary=True)
else:
self.source = fopen(source, 'r')
self.source_dicts = []
for source_dict in [uid_voc, mid_voc, cat_voc]:
self.source_dicts.append(load_dict(source_dict))
f_meta = file_io.FileIO((buckets + 'item-info'), 'r')
meta_map = {}
for line in f_meta:
arr = line.strip().split('\t')
if (arr[0] not in meta_map):
meta_map[arr[0]] = arr[1]
self.meta_id_map = {}
for key in meta_map:
val = meta_map[key]
if (key in self.source_dicts[1]):
mid_idx = self.source_dicts[1][key]
else:
mid_idx = 0
if (val in self.source_dicts[2]):
cat_idx = self.source_dicts[2][val]
else:
cat_idx = 0
self.meta_id_map[mid_idx] = cat_idx
f_review = file_io.FileIO((buckets + 'reviews-info'), 'r')
self.mid_list_for_random = []
for line in f_review:
arr = line.strip().split('\t')
tmp_idx = 0
if (arr[1] in self.source_dicts[1]):
tmp_idx = self.source_dicts[1][arr[1]]
self.mid_list_for_random.append(tmp_idx)
self.batch_size = batch_size
self.maxlen = maxlen
self.minlen = minlen
self.skip_empty = skip_empty
self.n_uid = len(self.source_dicts[0])
self.n_mid = len(self.source_dicts[1])
self.n_cat = len(self.source_dicts[2])
self.shuffle = shuffle_each_epoch
self.sort_by_length = sort_by_length
self.source_buffer = []
self.k = (batch_size * max_batch_size)
self.end_of_data = False
def get_n(self):
return (self.n_uid, self.n_mid, self.n_cat)
def __iter__(self):
return self
def reset(self):
if self.shuffle:
self.source = shuffle.main(self.source_orig, temporary=True)
else:
self.source.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
if (len(self.source_buffer) == 0):
for k_ in xrange(self.k):
ss = self.source.readline()
if (ss == ''):
break
self.source_buffer.append(ss.strip('\n').split('\t'))
if self.sort_by_length:
his_length = numpy.array([len(s[4].split('\x02')) for s in self.source_buffer])
tidx = his_length.argsort()
_sbuf = [self.source_buffer[i] for i in tidx]
self.source_buffer = _sbuf
else:
self.source_buffer.reverse()
if (len(self.source_buffer) == 0):
self.end_of_data = False
self.reset()
raise StopIteration
try:
while True:
try:
ss = self.source_buffer.pop()
except IndexError:
break
uid = (self.source_dicts[0][ss[1]] if (ss[1] in self.source_dicts[0]) else 0)
mid = (self.source_dicts[1][ss[2]] if (ss[2] in self.source_dicts[1]) else 0)
cat = (self.source_dicts[2][ss[3]] if (ss[3] in self.source_dicts[2]) else 0)
tmp = []
for fea in ss[4].split('\x02'):
m = (self.source_dicts[1][fea] if (fea in self.source_dicts[1]) else 0)
tmp.append(m)
mid_list = tmp
tmp1 = []
for fea in ss[5].split('\x02'):
c = (self.source_dicts[2][fea] if (fea in self.source_dicts[2]) else 0)
tmp1.append(c)
cat_list = tmp1
if (self.minlen != None):
if (len(mid_list) <= self.minlen):
continue
if (self.skip_empty and (not mid_list)):
continue
noclk_mid_list = []
noclk_cat_list = []
for pos_mid in mid_list:
noclk_tmp_mid = []
noclk_tmp_cat = []
noclk_index = 0
while True:
noclk_mid_indx = random.randint(0, (len(self.mid_list_for_random) - 1))
noclk_mid = self.mid_list_for_random[noclk_mid_indx]
if (noclk_mid == pos_mid):
continue
noclk_tmp_mid.append(noclk_mid)
noclk_tmp_cat.append(self.meta_id_map[noclk_mid])
noclk_index += 1
if (noclk_index >= 5):
break
noclk_mid_list.append(noclk_tmp_mid)
noclk_cat_list.append(noclk_tmp_cat)
source.append([uid, mid, cat, mid_list, cat_list, noclk_mid_list, noclk_cat_list])
target.append([float(ss[0]), (1 - float(ss[0]))])
if ((len(source) >= self.batch_size) or (len(target) >= self.batch_size)):
break
except IOError:
self.end_of_data = True
if ((len(source) == 0) or (len(target) == 0)):
(source, target) = self.next()
return (source, target) |
class GRU_F(nn.Module):
def __init__(self, args):
super(GRU_F, self).__init__()
self.args = args
self.text_gru = GRUencoder(args.fusion_t_in, args.fusion_t_hid, num_layers=args.fusion_gru_layers)
self.audio_gru = GRUencoder(args.fusion_a_in, args.fusion_a_hid, num_layers=args.fusion_gru_layers)
self.vision_gru = GRUencoder(args.fusion_v_in, args.fusion_v_hid, num_layers=args.fusion_gru_layers)
if args.use_linear:
self.fusion_trans_t = nn.Linear((args.fusion_t_hid * 2), (args.fusion_t_hid * 2))
self.fusion_trans_a = nn.Linear((args.fusion_a_hid * 2), (args.fusion_a_hid * 2))
self.fusion_trans_v = nn.Linear((args.fusion_v_hid * 2), (args.fusion_v_hid * 2))
self.fusion_dropout = nn.Dropout(args.fusion_drop)
self.classifier = nn.Sequential()
self.classifier.add_module('linear_trans_norm', nn.BatchNorm1d((((args.fusion_t_hid + args.fusion_a_hid) + args.fusion_v_hid) * 2)))
self.classifier.add_module('linear_trans_hidden', nn.Linear((((args.fusion_t_hid + args.fusion_a_hid) + args.fusion_v_hid) * 2), args.cls_hidden_dim))
self.classifier.add_module('linear_trans_activation', nn.LeakyReLU())
self.classifier.add_module('linear_trans_final', nn.Linear(args.cls_hidden_dim, 1))
def forward(self, text_x, audio_x, vision_x):
(text_x, text_mask) = text_x
(audio_x, audio_mask) = audio_x
(vision_x, vision_mask) = vision_x
add_zero = torch.zeros(size=[text_x.shape[0], 1], requires_grad=False).type_as(text_mask).to(text_mask.device)
text_mask_z = torch.cat((text_mask, add_zero), dim=1)
audio_mask_z = torch.cat((audio_mask, add_zero), dim=1)
vision_mask_z = torch.cat((vision_mask, add_zero), dim=1)
text_len = torch.argmin(text_mask_z, dim=1)
audio_len = torch.argmin(audio_mask_z, dim=1)
vision_len = torch.argmin(vision_mask_z, dim=1)
text_x = self.text_gru(text_x, text_len)
audio_x = self.audio_gru(audio_x, audio_len)
vision_x = self.vision_gru(vision_x, vision_len)
if self.args.use_linear:
text_rep = torch.max(torch.tanh(self.fusion_trans_t(text_x)), dim=1)[0]
audio_rep = torch.max(torch.tanh(self.fusion_trans_a(audio_x)), dim=1)[0]
vision_rep = torch.max(torch.tanh(self.fusion_trans_v(vision_x)), dim=1)[0]
else:
text_rep = torch.max(torch.tanh(text_x), dim=1)[0]
audio_rep = torch.max(torch.tanh(audio_x), dim=1)[0]
vision_rep = torch.max(torch.tanh(vision_x), dim=1)[0]
utterance_rep = torch.cat((text_rep, audio_rep, vision_rep), dim=1)
utterance_rep = self.fusion_dropout(utterance_rep)
return self.classifier(utterance_rep) |
class ResidualBlock(nn.Module):
def __init__(self, v):
super(ResidualBlock, self).__init__()
self.res = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(v, v, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(v, v, kernel_size=3, padding=1, bias=True))
def forward(self, x):
x = (x + self.res(x))
return x |
def assigner(task_groups, authorized_cols):
assigner = RandomGroupedAssigner
assigner = assigner(task_groups, tasks=None, authorized_cols=authorized_cols, rounds_to_train=ROUNDS_TO_TRAIN)
return assigner |
def factorial(n, algorithm='gmp'):
if (n < 0):
raise ValueError('factorial -- must be nonnegative')
if (algorithm == 'gmp'):
return ZZ(n).factorial()
elif (algorithm == 'pari'):
from sage.libs.pari.all import pari
return pari.factorial(n)
else:
raise ValueError('unknown algorithm') |
class CIFAR10(data.Dataset):
def __init__(self, root, train=True, transform=None, target_transform=None):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train
self.train_data = []
self.train_label = []
self.test_data = []
self.test_label = []
if self.train:
for i in range(5):
file = os.path.join(self.root, ('data_batch_' + str((i + 1))))
with open(file, 'rb') as fo:
dic = pickle.load(fo, encoding='bytes')
self.train_data.append(dic[b'data'])
self.train_label += dic[b'labels']
self.train_data = np.vstack(self.train_data).reshape((- 1), 3, 32, 32)
self.train_data = self.train_data.transpose((0, 2, 3, 1))
else:
file = os.path.join(self.root, 'test_batch')
with open(file, 'rb') as fo:
dic = pickle.load(fo, encoding='bytes')
self.test_data.append(dic[b'data'])
self.test_label += dic[b'labels']
self.test_data = np.vstack(self.test_data).reshape((- 1), 3, 32, 32)
self.test_data = self.test_data.transpose((0, 2, 3, 1))
def __getitem__(self, index):
if self.train:
img = self.train_data[index]
target = self.train_label[index]
else:
img = self.test_data[index]
target = self.test_label[index]
img = Image.fromarray(img)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
if self.train:
return len(self.train_label)
else:
return len(self.test_label)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = ('train' if (self.train is True) else 'test')
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
def save_pc(PC, PC_color, filename):
from plyfile import PlyElement, PlyData
PC = np.concatenate((PC, PC_color), axis=1)
PC = [tuple(element) for element in PC]
el = PlyElement.describe(np.array(PC, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]), 'vertex')
PlyData([el]).write(filename) |
def train(args, train_dataset, model, tokenizer):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
mkdir_p(args.output_dir)
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_collate_fn = partial(disamb_collate_fn, tokenizer=tokenizer)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=train_collate_fn)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(max(args.warmup_steps, (t_total * args.warmup_ratio))), num_training_steps=t_total)
if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Warmup steps = %d', int(max(args.warmup_steps, (t_total * args.warmup_ratio))))
logger.info(' Total optimization steps = %d', t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
try:
checkpoint_suffix = args.model_name_or_path.split('-')[(- 1)].split('/')[0]
global_step = int(checkpoint_suffix)
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'token_type_ids': batch[1], 'attention_mask': batch[2], 'sample_mask': batch[3], 'labels': batch[4]}
if (args.model_type in ['roberta', 'distilbert', 'camembert', 'bart']):
del inputs['token_type_ids']
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
logs = {}
logs['epoch'] = (_ + ((step + 1) / len(epoch_iterator)))
logs['learning_rate'] = scheduler.get_last_lr()[0]
logs['loss'] = ((tr_loss - logging_loss) / args.logging_steps)
logs['step'] = global_step
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logger.info('Training logs: {}'.format(logs))
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.eval_steps > 0) and ((global_step % args.eval_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
logger.info('Eval results: {}'.format(dict(results)))
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
class OpenConstituent(Transition):
def __init__(self, *label):
self.label = tuple(label)
self.top_label = self.label[0]
def delta_opens(self):
return 1
def update_state(self, state, model):
return (state.word_position, state.constituents, model.dummy_constituent(Dummy(self.label)), None)
def is_legal(self, state, model):
if (state.num_opens > (state.sentence_length + 10)):
return False
if model.is_top_down():
if state.empty_word_queue():
return False
if (not model.has_unary_transitions()):
is_root = (self.top_label in model.get_root_labels())
if is_root:
return state.empty_transitions()
else:
return (not state.empty_transitions())
else:
if (state.num_constituents() == 0):
return False
if isinstance(model.get_top_transition(state.transitions), OpenConstituent):
return False
if ((model.transition_scheme() is TransitionScheme.IN_ORDER_UNARY) or (model.transition_scheme() is TransitionScheme.IN_ORDER_COMPOUND)):
return (not state.empty_word_queue())
is_root = (self.top_label in model.get_root_labels())
if is_root:
return ((state.num_opens == 0) and state.empty_word_queue())
else:
if (((state.num_opens > 0) or state.empty_word_queue()) and too_many_unary_nodes(model.get_top_constituent(state.constituents), model.unary_limit())):
return False
return True
return True
def components(self):
return [OpenConstituent(label) for label in self.label]
def short_name(self):
return 'Open'
def __repr__(self):
return 'OpenConstituent({})'.format(self.label)
def __eq__(self, other):
if (self is other):
return True
if (not isinstance(other, OpenConstituent)):
return False
if (self.label == other.label):
return True
return False
def __hash__(self):
return hash(self.label) |
def save_cache(output_dir, tokens, tokenizer, act_count_ft_tkns, model_info):
output_dir = pathlib.Path(output_dir)
tokens_text = tokenizer.batch_decode(tokens, clean_up_tokenization_spaces=False)
tokens_str = [tokenizer.convert_ids_to_tokens(tokens[i]) for i in range(tokens.shape[0])]
tokens_str = [[tokenizer.convert_tokens_to_string([t]) for t in t_list] for t_list in tokens_str]
tokens_str_len_arr = np.array([[len(t) for t in t_list] for t_list in tokens_str])
token_byte_pos = np.cumsum(tokens_str_len_arr, axis=1)
np.save((output_dir / 'tokens.npy'), tokens)
np.save((output_dir / 'tokens_str.npy'), tokens_str)
np.save((output_dir / 'tokens_text.npy'), tokens_text)
np.save((output_dir / 'token_byte_pos.npy'), token_byte_pos)
with open((output_dir / 'act_count_ft_tkns.pkl'), 'wb') as f:
pickle.dump(act_count_ft_tkns, f)
model_info.save((output_dir / 'info.txt'))
print('Saved all data to', str(output_dir)) |
class XLMProphetNetEncoder(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def z_tilde(z_list, z_hat_list, nNodes=1, measDim=2):
z_tensor = np.array(([z_list] * nNodes))
z_hat_tensor = z_hat_list.reshape(nNodes, 1, measDim)
z_tilde_list = (z_tensor - z_hat_tensor)
return z_tilde_list |
def _fix_a_slash_b(string: str) -> str:
if (len(string.split('/')) != 2):
return string
a_str = string.split('/')[0]
b_str = string.split('/')[1]
try:
a = int(a_str)
b = int(b_str)
assert (string == '{}/{}'.format(a, b))
new_string = (((('\\frac{' + str(a)) + '}{') + str(b)) + '}')
return new_string
except Exception:
return string |
class Non_local(nn.Module):
def __init__(self, in_channels, bn_norm, reduc_ratio=2):
super(Non_local, self).__init__()
self.in_channels = in_channels
self.inter_channels = (reduc_ratio // reduc_ratio)
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.W = nn.Sequential(nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0), get_norm(bn_norm, self.in_channels))
nn.init.constant_(self.W[1].weight, 0.0)
nn.init.constant_(self.W[1].bias, 0.0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, (- 1))
f = torch.matmul(theta_x, phi_x)
N = f.size((- 1))
f_div_C = (f / N)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = (W_y + x)
return z |
def maybe_download_and_extract(data_dir):
train_dir = os.path.join(data_dir, 'train_32x32')
if (not os.path.exists(train_dir)):
train_url = '
filepath = os.path.join(data_dir, 'train_32x32.tar')
fetch(train_url, filepath)
print('unpacking the tar file', filepath)
tarfile.open(filepath, 'r').extractall(data_dir)
test_dir = os.path.join(data_dir, 'valid_32x32')
if (not os.path.exists(test_dir)):
test_url = '
filepath = os.path.join(data_dir, 'valid_32x32.tar')
fetch(test_url, filepath)
print('unpacking the tar file', filepath)
tarfile.open(filepath, 'r').extractall(data_dir) |
class LeafGenerator():
index = sys.maxsize
def __init__(self):
self.matches = []
def empty(self):
return (not self.matches)
def generate(self, atom, result):
result += self.matches
def _insert(self, args, value):
if (not args):
self.matches.append(value)
return self
else:
root = LeafGenerator()
root.matches.append(value)
for (arg_index, arg) in args[::(- 1)]:
new_root = MatchGenerator(arg_index, LeafGenerator())
new_root.match_generator[arg] = root
root = new_root
root.matches = self.matches
return root
def dump(self, indent):
for match in self.matches:
print(('%s%s' % (indent, match))) |
class BrickKilnDataset(SustainBenchDataset):
_dataset_name = 'brick_kiln'
_versions_dict = {'1.0': {'download_url': ' 'compressed_size': 7}}
def __init__(self, version=None, root_dir='data', download=False, split_scheme='official'):
self._version = version
self._data_dir = self.initialize_data_dir(root_dir, download)
self._split_dict = {'train': 0, 'val': 1, 'test': 2}
self._split_names = {'train': 'Train', 'val': 'Validation', 'test': 'Test'}
self._split_scheme = split_scheme
if (self._split_scheme not in ['official']):
raise ValueError(f'Split scheme {self._split_scheme} not recognized')
self.metadata = pd.read_csv(os.path.join(self.data_dir, 'list_eval_partition.csv'))
self._split_array = self.metadata['partition'].values
self._y_array = torch.from_numpy(self.metadata['y'].values)
self._y_size = 1
self._metadata_fields = ['y', 'hdf5_file', 'hdf5_idx', 'lon_top_left', 'lat_top_left', 'lon_bottom_right', 'lat_bottom_right', 'indice_x', 'indice_y']
self._metadata_array = torch.tensor(self.metadata[self.metadata_fields].astype(float).values)
super().__init__(root_dir, download, split_scheme)
def get_input(self, idx):
hdf5_loc = self.metadata['hdf5_file'].iloc[idx]
with h5py.File(os.path.join(self.data_dir, f'examples_{hdf5_loc}.hdf5'), 'r') as f:
img = f['images'][self.metadata['hdf5_idx'].iloc[idx]]
img = torch.from_numpy(img).float()
return img
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
if (prediction_fn is None):
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
results = {'Precision': precision, 'Recall': recall, 'Accuracy': accuracy}
results_str = f'Precision: {precision}, Recall: {recall}, Accuracy: {accuracy}'
else:
precision = precision_score(y_true, prediction_fn(y_pred))
recall = recall_score(y_true, prediction_fn(y_pred))
accuracy = accuracy_score(y_true, prediction_fn(y_pred))
auc = roc_auc_score(y_true, y_pred)
results = {'Precision': precision, 'Recall': recall, 'Accuracy': accuracy, 'AUC': auc}
results_str = f'Precision: {precision}, Recall: {recall}, Accuracy: {accuracy}, AUC: {auc}'
return (results, results_str) |
def build_from_cfg(cfg: Dict, registry: 'Registry', default_args: Optional[Dict]=None) -> Any:
if (not isinstance(cfg, dict)):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if ('type' not in cfg):
if ((default_args is None) or ('type' not in default_args)):
raise KeyError(f'''`cfg` or `default_args` must contain the key "type", but got {cfg}
{default_args}''')
if (not isinstance(registry, Registry)):
raise TypeError(f'registry must be an Registry object, but got {type(registry)}')
if (not (isinstance(default_args, dict) or (default_args is None))):
raise TypeError(f'default_args must be a dict or None, but got {type(default_args)}')
args = cfg.copy()
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
obj_type = args.pop('type')
if isinstance(obj_type, str):
obj_cls = registry.get(obj_type)
if (obj_cls is None):
raise KeyError(f'{obj_type} is not in the {registry.name} registry')
elif (inspect.isclass(obj_type) or inspect.isfunction(obj_type)):
obj_cls = obj_type
else:
raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}')
try:
return obj_cls(**args)
except Exception as e:
raise type(e)(f'{obj_cls.__name__}: {e}') |
class Trainer(object):
def __init__(self, network):
network = network.cuda()
network = DataParallel(network)
self.network = network
def reduce_loss_stats(self, loss_stats):
reduced_losses = {k: torch.mean(v) for (k, v) in loss_stats.items()}
return reduced_losses
def to_cuda(self, batch):
for k in batch:
if (k == 'meta'):
continue
if isinstance(batch[k], tuple):
batch[k] = [b.cuda() for b in batch[k]]
else:
batch[k] = batch[k].cuda()
return batch
def train(self, epoch, data_loader, optimizer, recorder):
max_iter = len(data_loader)
self.network.train()
end = time.time()
for (iteration, batch) in enumerate(data_loader):
data_time = (time.time() - end)
iteration = (iteration + 1)
recorder.step += 1
optimizer.zero_grad()
(output, loss, loss_stats, _) = self.network(batch)
loss = (loss.sum() / cfg.train.batch_size)
loss.backward()
torch.nn.utils.clip_grad_value_(self.network.parameters(), 40)
optimizer.step()
loss_stats = self.reduce_loss_stats(loss_stats)
recorder.update_loss_stats(loss_stats)
batch_time = (time.time() - end)
end = time.time()
recorder.batch_time.update(batch_time)
recorder.data_time.update(data_time)
if (((iteration % 100) == 0) or (iteration == (max_iter - 1))):
eta_seconds = (recorder.batch_time.global_avg * (max_iter - iteration))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
lr = optimizer.param_groups[0]['lr']
memory = ((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0)
training_state = ' '.join(['eta: {}', '{}', 'lr: {:.6f}', 'max_mem: {:.0f}'])
training_state = training_state.format(eta_string, str(recorder), lr, memory)
for (k, v) in recorder.loss_stats.items():
wlog = {f'train_{k}': v.avg, 'epoch': epoch}
wandb.log(wlog, step=recorder.step)
print(training_state)
sys.stdout.flush()
recorder.record('train')
def val(self, epoch, data_loader, evaluator=None, recorder=None):
self.network.eval()
torch.cuda.empty_cache()
val_loss_stats = {}
data_size = len(data_loader)
for batch in tqdm.tqdm(data_loader):
for k in batch:
if (k != 'meta'):
batch[k] = batch[k].cuda()
with torch.no_grad():
if recorder:
step = recorder.step
else:
step = (- 1)
(output, loss, loss_stats, _) = self.network(batch)
if (evaluator is not None):
evaluator.evaluate(output, batch)
loss_stats = self.reduce_loss_stats(loss_stats)
for (k, v) in loss_stats.items():
val_loss_stats.setdefault(k, 0)
val_loss_stats[k] += v
loss_state = []
for k in val_loss_stats.keys():
val_loss_stats[k] /= data_size
loss_state.append('{}: {:.4f}'.format(k, val_loss_stats[k]))
wandb.log({f'val_{k}': val_loss_stats[k], 'epoch': epoch})
print(loss_state)
if (evaluator is not None):
result = evaluator.summarize()
val_loss_stats.update(result)
if recorder:
recorder.record('val', epoch, val_loss_stats) |
def cuda_pointwise_context(loop_levels, block_count, block_size):
if loop_levels:
old_loop_levels = torch._C._jit_get_te_cuda_pointwise_loop_levels()
torch._C._jit_set_te_cuda_pointwise_loop_levels(loop_levels)
if block_count:
old_block_count = torch._C._jit_get_te_cuda_pointwise_block_count()
torch._C._jit_set_te_cuda_pointwise_block_count(block_count)
if block_size:
old_block_size = torch._C._jit_get_te_cuda_pointwise_block_size()
torch._C._jit_set_te_cuda_pointwise_block_size(block_size)
(yield)
if loop_levels:
torch._C._jit_set_te_cuda_pointwise_loop_levels(old_loop_levels)
if block_count:
torch._C._jit_set_te_cuda_pointwise_block_count(old_block_count)
if block_size:
torch._C._jit_set_te_cuda_pointwise_block_size(old_block_size) |
def is_actor_done(actor):
if (actor is None):
return True
done_ref = actor.__ray_terminate__.remote()
(done, not_done) = ray.wait([done_ref], timeout=5)
return (len(not_done) == 0) |
class MutableConfig():
def __init__(self):
pass
remove_conll_tmp = False
eval_mode = EvalMethod.Char
coref_mention_threshold = 1.0 |
_level_function()
def nanvar(x, weight=None, ddof=0, axis=None, *, keepdims=False, mask_identity=True, highlevel=True, behavior=None, attrs=None):
(yield (x, weight))
if (weight is not None):
weight = ak.operations.ak_nan_to_none._impl(weight, True, behavior, attrs)
return _impl(ak.operations.ak_nan_to_none._impl(x, highlevel, behavior, attrs), weight, ddof, axis, keepdims, mask_identity, highlevel, behavior, attrs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.