code stringlengths 101 5.91M |
|---|
def create_dataset(cfg: CfgNode, dataset_cfg: CfgNode, train: bool=True, **kwargs) -> Dataset:
dataset_type = Dataset.registry[dataset_cfg.TYPE]
return dataset_type(cfg, **to_lower(dataset_cfg), train=train, **kwargs) |
def get_result(setup):
result_dict = {}
for dataset in dataset_all:
result_dict[dataset] = {}
sub_result_dict = result_dict[dataset]
basedir = f'{base_output_dir}/{dataset}/{setup}'
for (alg_name, alg_name_long) in algorithm_all.items():
if (alg_name in ['CLIPPretrained', 'CLIPBase']):
sub_result_dict[alg_name] = {}
subsub_result_dict = sub_result_dict[alg_name]
output_dir = os.path.join(basedir, f'{alg_name_long}/base')
print(output_dir)
((acc_tgt, acc_src, acc_tgt_in), (acc_tgt_std, acc_src_std, acc_tgt_in_std)) = get_results(output_dir, select_method)
subsub_result_dict['acc_tgt'] = acc_tgt
subsub_result_dict['acc_src'] = acc_src
subsub_result_dict['acc_tgt_in'] = acc_tgt_in
subsub_result_dict['acc_diff'] = (acc_src - acc_tgt)
subsub_result_dict['acc_tgt_std'] = acc_tgt_std
subsub_result_dict['acc_src_std'] = acc_src_std
subsub_result_dict['acc_tgt_in_std'] = acc_tgt_in_std
else:
lambda_str_array = list(map((lambda s: s.split('_')[(- 1)]), glob.glob(os.path.join(basedir, f'{alg_name_long}/*'))))
lambda_str_array = sorted(lambda_str_array, key=(lambda r: float(r)))
lambda_val_array = np.array(list(map((lambda s: float(s)), lambda_str_array)))
for lambda_str in lambda_str_array:
sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))] = {}
subsub_result_dict = sub_result_dict[(alg_name + '_lambda_{}'.format(lambda_str))]
output_dir = os.path.join(basedir, f'{alg_name_long}/lambda_{lambda_str}')
((acc_tgt, acc_src, acc_tgt_in), (acc_tgt_std, acc_src_std, acc_tgt_in_std)) = get_results(output_dir, select_method)
subsub_result_dict['acc_tgt'] = acc_tgt
subsub_result_dict['acc_src'] = acc_src
subsub_result_dict['acc_tgt_in'] = acc_tgt_in
subsub_result_dict['acc_diff'] = (acc_src - acc_tgt)
subsub_result_dict['acc_tgt_std'] = acc_tgt_std
subsub_result_dict['acc_src_std'] = acc_src_std
subsub_result_dict['acc_tgt_in_std'] = acc_tgt_in_std
return result_dict |
def flatten_batch_lists(batch_list, nb_batches):
flat_list = []
for b in range(nb_batches):
flat_list += batch_list[b]
return flat_list |
class MLP_2HL(nn.Module):
def __init__(self, dim_in, dim_hidden1, dim_hidden2, sparse=False, bn=True):
super(MLP_2HL, self).__init__()
self.in_layer = (SpLinear(dim_in, dim_hidden1) if sparse else nn.Linear(dim_in, dim_hidden1))
self.dropout_layer = nn.Dropout(0.0)
self.lrelu = nn.LeakyReLU(0.1)
self.relu = nn.ReLU()
self.hidden_layer = nn.Linear(dim_hidden1, dim_hidden2)
self.out_layer = nn.Linear(int(dim_hidden2), 1)
self.bn = nn.BatchNorm1d(dim_hidden1)
self.bn2 = nn.BatchNorm1d(dim_in)
def forward(self, x, lower_f):
if (lower_f is not None):
x = torch.cat([x, lower_f], dim=1)
x = self.bn2(x)
out = self.lrelu(self.in_layer(x))
out = self.bn(out)
out = self.hidden_layer(out)
return (out, self.out_layer(self.relu(out)).squeeze())
def get_model(cls, stage, opt):
if (stage == 0):
dim_in = opt.feat_d
else:
dim_in = (opt.feat_d + opt.hidden_d)
model = MLP_2HL(dim_in, opt.hidden_d, opt.hidden_d, opt.sparse)
return model |
class TestGFortranVersions(object):
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for (vs, version) in gfortran_version_strings:
v = fc.version_match(vs)
assert_((v == version), (vs, v))
def test_not_gfortran(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for (vs, _) in g77_version_strings:
v = fc.version_match(vs)
assert_((v is None), (vs, v)) |
def test_prefitted_throws_error():
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
st = SelfTrainingClassifier(knn)
with pytest.raises(NotFittedError, match='This SelfTrainingClassifier instance is not fitted yet'):
st.predict(X_train) |
def run_and_return_first_line(run_lambda, command):
(rc, out, _) = run_lambda(command)
if (rc != 0):
return None
return out.split('\n')[0] |
class Partition15(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:15'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.21', 'l_1': 'decoder.block.22', 'l_2': 'decoder.block.23', 'l_3': 'decoder.final_layer_norm', 'l_4': 'decoder.dropout', 'l_5': 'lm_head'}
self.to(self.device)
def forward(self, *args):
(labels, x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_1 = self.l_3(t_1)
t_2 = t_0[2]
t_0 = t_0[3]
t_1 = self.l_4(t_1)
t_1 = (t_1 * 0.03125)
t_1 = self.l_5(t_1)
t_3 = t_1.size((- 1))
t_3 = t_1.view((- 1), t_3)
t_1 = labels.view((- 1))
t_1 = torch.nn.functional.cross_entropy(t_3, t_1, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean')
return (t_1,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def get_random_predictions(train_file, test_file, iterations=1000):
df = pd.read_csv(train_file)
total_label = df['label'].to_numpy()
total_ones = np.sum(total_label)
total_zeros = (len(total_label) - total_ones)
df = pd.read_csv(test_file)
random_label = choices([0, 1], [total_zeros, total_ones], k=len(df))
preds = []
for (i, pred) in enumerate(random_label):
preds.append({'index': i, 'prediction': int(pred)})
save_file_path = 'outs/submission_random_st1.json'
with open(save_file_path, 'w') as fp:
fp.write('\n'.join((json.dumps(i) for i in preds))) |
class TFRegNetSELayer(tf.keras.layers.Layer):
def __init__(self, in_channels: int, reduced_channels: int, **kwargs):
super().__init__(**kwargs)
self.pooler = tf.keras.layers.GlobalAveragePooling2D(keepdims=True, name='pooler')
self.attention = [tf.keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation='relu', name='attention.0'), tf.keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation='sigmoid', name='attention.2')]
def call(self, hidden_state):
pooled = self.pooler(hidden_state)
for layer_module in self.attention:
pooled = layer_module(pooled)
hidden_state = (hidden_state * pooled)
return hidden_state |
def ground_truth_reconstruct_multi(inp, cfg):
with torch.no_grad():
assert hasattr(cfg, 'inference')
step_size_ratio = float(getattr(cfg.inference, 'step_size_ratio', 1))
num_steps = int(getattr(cfg.inference, 'num_steps', 5))
num_points = int(getattr(cfg.inference, 'num_points', inp.size(1)))
weight = float(getattr(cfg.inference, 'weight', 1))
x = get_prior(inp.size(0), num_points, cfg.models.scorenet.dim).cuda()
if hasattr(cfg.trainer, 'sigmas'):
sigmas = cfg.trainer.sigmas
else:
sigma_begin = float(cfg.trainer.sigma_begin)
sigma_end = float(cfg.trainer.sigma_end)
num_classes = int(cfg.trainer.sigma_num)
sigmas = np.exp(np.linspace(np.log(sigma_begin), np.log(sigma_end), num_classes))
x_list = []
x_list.append(x.clone())
(bs, num_pts) = (x.size(0), x.size(1))
for sigma in sigmas:
sigma = (torch.ones((1,)) * sigma)
sigma = sigma.cuda()
step_size = ((2 * (sigma ** 2)) * step_size_ratio)
for t in range(num_steps):
z_t = (torch.randn_like(x) * weight)
x += (torch.sqrt(step_size) * z_t)
grad = ground_truth_field(x, inp, sigma)
x += ((0.5 * step_size) * grad)
x_list.append(x.clone())
return (x, x_list) |
class RegNetConfig(PretrainedConfig):
model_type = 'regnet'
layer_types = ['x', 'y']
def __init__(self, num_channels=3, embedding_size=32, hidden_sizes=[128, 192, 512, 1088], depths=[2, 6, 12, 2], groups_width=64, layer_type='y', hidden_act='relu', **kwargs):
super().__init__(**kwargs)
if (layer_type not in self.layer_types):
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.groups_width = groups_width
self.layer_type = layer_type
self.hidden_act = hidden_act
self.downsample_in_first_stage = True |
class TFRobertaForNaturalQuestionAnswering(TFRobertaPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roberta = TFRobertaMainLayer(config, name='roberta')
self.initializer = get_initializer(config.initializer_range)
self.qa_outputs = L.Dense(config.num_labels, kernel_initializer=self.initializer, name='qa_outputs')
self.long_outputs = L.Dense(1, kernel_initializer=self.initializer, name='long_outputs')
def call(self, inputs, **kwargs):
outputs = self.roberta(inputs, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
(start_logits, end_logits) = tf.split(logits, 2, axis=(- 1))
start_logits = tf.squeeze(start_logits, (- 1))
end_logits = tf.squeeze(end_logits, (- 1))
long_logits = tf.squeeze(self.long_outputs(sequence_output), (- 1))
return (start_logits, end_logits, long_logits) |
def _delta_poly(prec=10):
if (prec <= 0):
raise ValueError('prec must be positive')
v = ([0] * prec)
stop = int((((- 1) + math.sqrt((1 + (8 * prec)))) / 2.0))
values = [(((n * (n + 1)) // 2), ((((- 2) * n) - 1) if (n & 1) else ((2 * n) + 1))) for n in range((stop + 1))]
for (i1, v1) in values:
for (i2, v2) in values:
try:
v[(i1 + i2)] += (v1 * v2)
except IndexError:
break
f = Fmpz_poly(v)
t = verbose('made series')
f = (f * f)
f._unsafe_mutate_truncate(prec)
t = verbose('squared (2 of 3)', t)
f = (f * f)
f._unsafe_mutate_truncate((prec - 1))
t = verbose('squared (3 of 3)', t)
f = f.left_shift(1)
t = verbose('shifted', t)
return f |
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum((- 1), keepdim=True)
def entrop(self):
return super.entropy().sum((- 1))
def mode(self):
return self.mean |
.parametrize('forest_cls', FORESTS)
def test_fit_int_time(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
y = whas500.y
y_int = np.empty(y.shape[0], dtype=[(y.dtype.names[0], bool), (y.dtype.names[1], int)])
y_int[:] = y
forest_f = forest_cls(oob_score=True, random_state=2).fit(whas500.x[50:], y[50:])
forest_i = forest_cls(oob_score=True, random_state=2).fit(whas500.x[50:], y_int[50:])
assert (len(forest_f.estimators_) == len(forest_i.estimators_))
assert (forest_f.n_features_in_ == forest_i.n_features_in_)
assert (forest_f.oob_score_ == forest_i.oob_score_)
assert_array_almost_equal(forest_f.unique_times_, forest_i.unique_times_)
pred_f = forest_f.predict(whas500.x[:50])
pred_i = forest_i.predict(whas500.x[:50])
assert_array_almost_equal(pred_f, pred_i) |
def test_gdb():
global have_gdb
if (have_gdb is not None):
return have_gdb
have_gdb = False
try:
p = subprocess.Popen(['gdb', '-nx', '--version'], stdout=subprocess.PIPE)
except OSError:
gdb_version = None
else:
(stdout, _) = p.communicate()
regex = 'GNU gdb [^\\d]*(\\d+)\\.(\\d+)'
gdb_version = re.match(regex, stdout.decode('ascii', 'ignore'))
if gdb_version:
gdb_version_number = list(map(int, gdb_version.groups()))
if (gdb_version_number >= [7, 2]):
have_gdb = True
with tempfile.NamedTemporaryFile(mode='w+') as python_version_script:
python_version_script.write('python import sys; print("%s %s" % sys.version_info[:2])')
python_version_script.flush()
p = subprocess.Popen(['gdb', '-batch', '-x', python_version_script.name], stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
try:
internal_python_version = list(map(int, stdout.decode('ascii', 'ignore').split()))
if (internal_python_version < [2, 6]):
have_gdb = False
except ValueError:
have_gdb = False
if (not have_gdb):
warnings.warn('Skipping gdb tests, need gdb >= 7.2 with Python >= 2.6')
return have_gdb |
def main(args):
if (args.num_envs is None):
import multiprocessing as mp
args.num_envs = max((mp.cpu_count() - 1), 1)
merge_args_into_config(args, config)
if (config.gamma < 1.0):
config.clip_target_range = (np.round((- (1 / (1 - config.gamma))), 2), 0.0)
if (config.gamma == 1):
config.clip_target_range = (np.round(((- args.env_max_step) - 5), 2), 0.0)
if args.sparse_reward_shaping:
config.clip_target_range = ((- np.inf), np.inf)
config.agent_name = make_agent_name(config, ['env', 'alg', 'her', 'layers', 'seed', 'tb', 'ag_curiosity', 'eexplore', 'first_visit_succ', 'dg_score_multiplier', 'alpha'], prefix=args.prefix)
config.update(dict(trainer=StandardTrain(), evaluation=EpisodicEval(), policy=ActorPolicy(), logger=Logger(), state_normalizer=Normalizer(MeanStdNormalizer()), replay=OnlineHERBuffer()))
config.prioritized_mode = args.prioritized_mode
if (config.prioritized_mode == 'mep'):
config.prioritized_replay = EntropyPrioritizedOnlineHERBuffer()
if args.sweep_safety_interest:
assert ('sweep' in args.env)
config.ag_interest = SweepSafetyInterest()
if (not args.no_ag_kde):
config.ag_kde = RawKernelDensity('ag', optimize_every=1, samples=10000, kernel=args.kde_kernel, bandwidth=args.bandwidth, log_entropy=True)
if (args.ag_curiosity is not None):
config.dg_kde = RawKernelDensity('dg', optimize_every=500, samples=10000, kernel='tophat', bandwidth=0.2)
config.ag_kde_tophat = RawKernelDensity('ag', optimize_every=100, samples=10000, kernel='tophat', bandwidth=0.2, tag='_tophat')
if args.transition_to_dg:
config.alpha_curiosity = CuriosityAlphaMixtureModule()
if ('rnd' in args.ag_curiosity):
config.ag_rnd = RandomNetworkDensity('ag')
if ('flow' in args.ag_curiosity):
config.ag_flow = FlowDensity('ag')
use_qcutoff = (not args.no_cutoff)
if (args.ag_curiosity == 'minq'):
config.ag_curiosity = QAchievedGoalCuriosity(max_steps=args.env_max_step, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'randq'):
config.ag_curiosity = QAchievedGoalCuriosity(max_steps=args.env_max_step, randomize=True, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'minkde'):
config.ag_curiosity = DensityAchievedGoalCuriosity(max_steps=args.env_max_step, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'minrnd'):
config.ag_curiosity = DensityAchievedGoalCuriosity('ag_rnd', max_steps=args.env_max_step, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'minflow'):
config.ag_curiosity = DensityAchievedGoalCuriosity('ag_flow', max_steps=args.env_max_step, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'randkde'):
config.ag_curiosity = DensityAchievedGoalCuriosity(alpha=args.alpha, max_steps=args.env_max_step, randomize=True, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'randrnd'):
config.ag_curiosity = DensityAchievedGoalCuriosity('ag_rnd', alpha=args.alpha, max_steps=args.env_max_step, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'randflow'):
config.ag_curiosity = DensityAchievedGoalCuriosity('ag_flow', alpha=args.alpha, max_steps=args.env_max_step, num_sampled_ags=args.num_sampled_ags, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'goaldisc'):
config.success_predictor = GoalSuccessPredictor(batch_size=args.succ_bs, history_length=args.succ_hl, optimize_every=args.succ_oe)
config.ag_curiosity = SuccessAchievedGoalCuriosity(max_steps=args.env_max_step, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
elif (args.ag_curiosity == 'entropygainscore'):
config.bg_kde = RawKernelDensity('bg', optimize_every=args.env_max_step, samples=10000, kernel=args.kde_kernel, bandwidth=args.bandwidth, log_entropy=True)
config.bgag_kde = RawJointKernelDensity(['bg', 'ag'], optimize_every=args.env_max_step, samples=10000, kernel=args.kde_kernel, bandwidth=args.bandwidth, log_entropy=True)
config.ag_curiosity = EntropyGainScoringGoalCuriosity(max_steps=args.env_max_step, use_qcutoff=use_qcutoff, keep_dg_percent=args.keep_dg_percent)
else:
raise NotImplementedError
if (args.noise_type.lower() == 'gaussian'):
noise_type = GaussianProcess
if (args.noise_type.lower() == 'ou'):
noise_type = OrnsteinUhlenbeckProcess
config.action_noise = ContinuousActionNoise(noise_type, std=ConstantSchedule(args.action_noise))
if (args.alg.lower() == 'ddpg'):
config.algorithm = DDPG()
elif (args.alg.lower() == 'td3'):
config.algorithm = TD3()
config.target_network_update_freq *= 2
elif (args.alg.lower() == 'dqn'):
config.algorithm = DQN()
config.policy = QValuePolicy()
config.qvalue_lr = config.critic_lr
config.qvalue_weight_decay = config.actor_weight_decay
config.double_q = True
config.random_action_prob = LinearSchedule(1.0, config.eexplore, 100000.0)
else:
raise NotImplementedError
(env, eval_env) = make_env(args)
if args.first_visit_done:
(env1, eval_env1) = (env, eval_env)
env = (lambda : FirstVisitDoneWrapper(env1()))
eval_env = (lambda : FirstVisitDoneWrapper(eval_env1()))
if args.first_visit_succ:
config.first_visit_succ = True
config.train_env = EnvModule(env, num_envs=args.num_envs, seed=args.seed)
config.eval_env = EnvModule(eval_env, num_envs=args.num_eval_envs, name='eval_env', seed=(args.seed + 1138))
e = config.eval_env
if (args.alg.lower() == 'dqn'):
config.qvalue = PytorchModel('qvalue', (lambda : Critic(FCBody((e.state_dim + e.goal_dim), args.layers, nn.LayerNorm, make_activ(config.activ)), e.action_dim)))
else:
config.actor = PytorchModel('actor', (lambda : Actor(FCBody((e.state_dim + e.goal_dim), args.layers, nn.LayerNorm, make_activ(config.activ)), e.action_dim, e.max_action)))
config.critic = PytorchModel('critic', (lambda : Critic(FCBody(((e.state_dim + e.goal_dim) + e.action_dim), args.layers, nn.LayerNorm, make_activ(config.activ)), 1)))
if (args.alg.lower() == 'td3'):
config.critic2 = PytorchModel('critic2', (lambda : Critic(FCBody(((e.state_dim + e.goal_dim) + e.action_dim), args.layers, nn.LayerNorm, make_activ(config.activ)), 1)))
if (args.ag_curiosity == 'goaldisc'):
config.goal_discriminator = PytorchModel('goal_discriminator', (lambda : Critic(FCBody((e.state_dim + e.goal_dim), args.layers, nn.LayerNorm, make_activ(config.activ)), 1)))
if (args.reward_module == 'env'):
config.goal_reward = GoalEnvReward()
elif (args.reward_module == 'intrinsic'):
config.goal_reward = NeighborReward()
config.neighbor_embedding_network = PytorchModel('neighbor_embedding_network', (lambda : FCBody(e.goal_dim, (256, 256))))
else:
raise ValueError('Unsupported reward module: {}'.format(args.reward_module))
if config.eval_env.goal_env:
if (not (args.first_visit_done or args.first_visit_succ)):
config.never_done = True
agent = mrl.config_to_agent(config)
if args.visualize_trained_agent:
print('Loading agent at epoch {}'.format(0))
agent.load('checkpoint')
if args.intrinsic_visualization:
agent.eval_mode()
agent.train(10000, render=True, dont_optimize=True)
else:
agent.eval_mode()
env = agent.eval_env
for _ in range(10000):
print('NEW EPISODE')
state = env.reset()
env.render()
done = False
while (not done):
time.sleep(0.02)
action = agent.policy(state)
(state, reward, done, info) = env.step(action)
env.render()
print(reward[0])
elif args.collect_noisy_expert:
print('Loading agent at epoch {}'.format(0))
agent.load('checkpoint')
agent.eval_mode()
env = agent.eval_env
raw_env = agent.eval_env.env.envs[0]
collected_exps = []
while (len(collected_exps) < 500000):
print(len(collected_exps))
state = env.reset()
raw_env.smaller_state = True
state1 = raw_env._get_obs()['observation']
raw_env.smaller_state = False
done = False
n_steps = 0
while (n_steps < 50):
n_steps += 1
action = agent.policy(state)
if (np.random.random() < 0.5):
action = agent.action_noise(action)
(next_state, reward, done, info) = env.step(action)
raw_env.smaller_state = True
next_state1 = raw_env._get_obs()['observation']
raw_env.smaller_state = False
collected_exps.append((state1, action, next_state1, reward, done))
state = next_state
state1 = next_state1
print([i['is_success'] for i in info])
import pickle
with open(os.path.join(agent.config.parent_folder, 'noisy_expert_buffer1.pickle'), 'wb') as f:
pickle.dump(collected_exps, f)
print('done & saved!')
else:
ag_buffer = agent.replay_buffer.buffer.BUFF.buffer_ag
bg_buffer = agent.replay_buffer.buffer.BUFF.buffer_bg
res = np.mean(agent.eval(num_episodes=30).rewards)
agent.logger.log_color('Initial test reward (30 eps):', '{:.2f}'.format(res))
for epoch in range(int((args.max_steps // args.epoch_len))):
t = time.time()
agent.train(num_steps=args.epoch_len)
if args.save_embeddings:
sample_idxs = np.random.choice(len(ag_buffer), size=min(len(ag_buffer), args.epoch_len), replace=False)
last_idxs = np.arange(max(0, (len(ag_buffer) - args.epoch_len)), len(ag_buffer))
agent.logger.add_embedding('rand_ags', ag_buffer.get_batch(sample_idxs))
agent.logger.add_embedding('last_ags', ag_buffer.get_batch(last_idxs))
agent.logger.add_embedding('last_bgs', bg_buffer.get_batch(last_idxs))
res = np.mean(agent.eval(num_episodes=30).rewards)
agent.logger.log_color('Test reward (30 eps):', '{:.2f}'.format(res))
agent.logger.log_color('Epoch time:', '{:.2f}'.format((time.time() - t)), color='yellow')
print('Saving agent at epoch {}'.format(epoch))
agent.save('checkpoint') |
_utils.test()
def test_reduction_non_full_warp():
def test() -> ti.i32:
hit_time = 1
ti.loop_config(block_dim=8)
for i in range(8):
ti.atomic_min(hit_time, 1)
return hit_time
assert (test() == 1) |
class TestCategoryFolderIO(object):
.slow
def test_imdb(self, spacy_nlp_en):
folder_io = CategoryFolderIO(categories=['pos', 'neg'], mapping={'<br />': '\n'}, tokenize_callback=spacy_nlp_en, encoding='utf-8', case_mode='lower')
train_data = folder_io.read('data/imdb/train')
test_data = folder_io.read('data/imdb/test')
assert (len(train_data) == 25000)
assert (len(test_data) == 25000) |
class InstanceData(GeneralData):
def __setattr__(self, name, value):
if (name in ('_meta_info_fields', '_data_fields')):
if (not hasattr(self, name)):
super().__setattr__(name, value)
else:
raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ')
else:
assert isinstance(value, (torch.Tensor, np.ndarray, list)), f'Can set {type(value)}, only support {(torch.Tensor, np.ndarray, list)}'
if self._data_fields:
assert (len(value) == len(self)), f'the length of values {len(value)} is not consistent with the length of this :obj:`InstanceData` {len(self)} '
super().__setattr__(name, value)
def __getitem__(self, item):
assert len(self), ' This is a empty instance'
assert isinstance(item, (str, slice, int, torch.LongTensor, torch.BoolTensor))
if isinstance(item, str):
return getattr(self, item)
if (type(item) == int):
if ((item >= len(self)) or (item < (- len(self)))):
raise IndexError(f'Index {item} out of range!')
else:
item = slice(item, None, len(self))
new_data = self.new()
if isinstance(item, torch.Tensor):
assert (item.dim() == 1), 'Only support to get the values along the first dimension.'
if isinstance(item, torch.BoolTensor):
assert (len(item) == len(self)), f'The shape of the input(BoolTensor)) {len(item)} does not match the shape of the indexed tensor in results_filed {len(self)} at first dimension. '
for (k, v) in self.items():
if isinstance(v, torch.Tensor):
new_data[k] = v[item]
elif isinstance(v, np.ndarray):
new_data[k] = v[item.cpu().numpy()]
elif isinstance(v, list):
r_list = []
if isinstance(item, torch.BoolTensor):
indexes = torch.nonzero(item).view((- 1))
else:
indexes = item
for index in indexes:
r_list.append(v[index])
new_data[k] = r_list
else:
for (k, v) in self.items():
new_data[k] = v[item]
return new_data
def cat(instances_list):
assert all((isinstance(results, InstanceData) for results in instances_list))
assert (len(instances_list) > 0)
if (len(instances_list) == 1):
return instances_list[0]
new_data = instances_list[0].new()
for k in instances_list[0]._data_fields:
values = [results[k] for results in instances_list]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, np.ndarray):
values = np.concatenate(values, axis=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
else:
raise ValueError(f'Can not concat the {k} which is a {type(v0)}')
new_data[k] = values
return new_data
def __len__(self):
if len(self._data_fields):
for v in self.values():
return len(v)
else:
raise AssertionError('This is an empty `InstanceData`.') |
def calculade_fid_no_img(img_i, activations_pred, activations_target, eps=1e-06):
activations_pred = activations_pred.copy()
activations_pred[img_i] = activations_target[img_i]
return calculate_frechet_distance(activations_pred, activations_target, eps=eps) |
def load_checkpoint(model, filename, map_location='cpu', strict=False, logger=None, tmp=False):
checkpoint = _load_checkpoint(filename, map_location, logger)
if (not isinstance(checkpoint, dict)):
raise RuntimeError(f'No state_dict found in checkpoint file {filename}')
if ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
for (k, v) in state_dict.items():
print(k, v.shape)
print('')
for (k, v) in model.state_dict().items():
print(k, v.shape)
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for (k, v) in state_dict.items()}
if tmp:
if list(state_dict.keys())[0].startswith('online'):
state_dict = {k[11:]: v for (k, v) in state_dict.items()}
elif list(state_dict.keys())[0].startswith('encoder_q'):
state_dict_new = {}
for (k, v) in state_dict.items():
if k.startswith('encoder_q.fc.fc1.'):
state_dict_new[('1.fc0.' + k[17:])] = v
elif k.startswith('encoder_q.fc.fc2.'):
state_dict_new[('1.fc1.' + k[17:])] = v
elif k.startswith('encoder_q.fc.bn1.'):
state_dict_new[('1.bn0.' + k[17:])] = v
elif (not k.startswith('predictor')):
state_dict_new[('0.' + k[10:])] = v
load_state_dict(model, state_dict_new, strict, logger)
return checkpoint |
def stream(stream):
if (stream is None):
(yield)
return
src_prev_stream = current_stream()
if (src_prev_stream.device != stream.device):
with device(stream.device):
dst_prev_stream = current_stream()
torch._C._cuda_setStream(stream._cdata)
try:
(yield)
finally:
if (src_prev_stream.device != stream.device):
torch._C._cuda_setStream(dst_prev_stream._cdata)
torch._C._cuda_setStream(src_prev_stream._cdata) |
def batchnorm_refusing_node_matchers():
bn_node = NodeOperationMatcher(BatchNorm2d)
source_node = (NodeOperationMatcher(Conv2d) | NodeOperationMatcher(ConvTranspose2d))
return (bn_node, source_node) |
class ComputationCache():
def __init__(self, chromosome, fitness_functions: (list[FitnessFunction] | None)=None, coverage_functions: (list[CoverageFunction] | None)=None, fitness_cache: (dict[(FitnessFunction, float)] | None)=None, is_covered_cache: (dict[(FitnessFunction, bool)] | None)=None, coverage_cache: (dict[(CoverageFunction, float)] | None)=None):
self._chromosome = chromosome
self._fitness_functions = (fitness_functions if fitness_functions else [])
self._coverage_functions = (coverage_functions if coverage_functions else [])
self._fitness_cache: dict[(FitnessFunction, float)] = (fitness_cache if fitness_cache else {})
self._is_covered_cache: dict[(FitnessFunction, bool)] = (is_covered_cache if is_covered_cache else {})
self._coverage_cache: dict[(CoverageFunction, float)] = (coverage_cache if coverage_cache else {})
def clone(self, new_chromosome) -> ComputationCache:
return ComputationCache(new_chromosome, list(self._fitness_functions), list(self._coverage_functions), dict(self._fitness_cache), dict(self._is_covered_cache), dict(self._coverage_cache))
def get_fitness_functions(self) -> list[FitnessFunction]:
return self._fitness_functions
def add_fitness_function(self, fitness_function: FitnessFunction) -> None:
assert (not fitness_function.is_maximisation_function()), 'Currently only minimization is supported'
self._fitness_functions.append(fitness_function)
def get_coverage_functions(self) -> list[CoverageFunction]:
return self._coverage_functions
def add_coverage_function(self, coverage_function: CoverageFunction) -> None:
self._coverage_functions.append(coverage_function)
T = TypeVar('T', CoverageFunction, FitnessFunction)
def _check_cache(self, comp: Callable[([(T | None)], None)], cache: dict[(T, Any)], funcs: list[T], only: (T | None)=None) -> None:
if self._chromosome.changed:
self.invalidate_cache()
comp(only)
self._chromosome.changed = False
elif (len(cache) != len(funcs)):
comp(only)
def _compute_fitness(self, only: (FitnessFunction | None)=None):
for fitness_func in (self._fitness_functions if (only is None) else (only,)):
if (fitness_func not in self._fitness_cache):
new_value = fitness_func.compute_fitness(self._chromosome)
assert ((not math.isnan(new_value)) and (not math.isinf(new_value)) and (new_value >= 0)), f'Invalid fitness value {new_value}'
self._fitness_cache[fitness_func] = new_value
self._is_covered_cache[fitness_func] = (new_value == 0.0)
def _compute_is_covered(self, only: (FitnessFunction | None)=None):
for fitness_func in (self._fitness_functions if (only is None) else (only,)):
if (fitness_func not in self._is_covered_cache):
new_value = fitness_func.compute_is_covered(self._chromosome)
self._is_covered_cache[fitness_func] = new_value
def _compute_coverage(self, only: (CoverageFunction | None)=None):
for coverage_func in (self._coverage_functions if (only is None) else (only,)):
if (coverage_func not in self._coverage_cache):
new_value = coverage_func.compute_coverage(self._chromosome)
assert ((not math.isnan(new_value)) and (not math.isinf(new_value)) and (0 <= new_value <= 1)), f'Invalid coverage value {new_value}'
self._coverage_cache[coverage_func] = new_value
def invalidate_cache(self) -> None:
self._fitness_cache.clear()
self._is_covered_cache.clear()
self._coverage_cache.clear()
def get_fitness(self) -> float:
self._check_cache(self._compute_fitness, self._fitness_cache, self._fitness_functions)
return sum(self._fitness_cache.values())
def get_fitness_for(self, fitness_function: FitnessFunction) -> float:
self._check_cache(self._compute_fitness, self._fitness_cache, self._fitness_functions, fitness_function)
return self._fitness_cache[fitness_function]
def get_is_covered(self, fitness_function: FitnessFunction) -> bool:
self._check_cache(self._compute_is_covered, self._is_covered_cache, self._fitness_functions, fitness_function)
return self._is_covered_cache[fitness_function]
def get_coverage(self) -> float:
self._check_cache(self._compute_coverage, self._coverage_cache, self._coverage_functions)
return statistics.mean(self._coverage_cache.values())
def get_coverage_for(self, coverage_function: CoverageFunction) -> float:
self._check_cache(self._compute_coverage, self._coverage_cache, self._coverage_functions, coverage_function)
return self._coverage_cache[coverage_function] |
def show_seg_data(idx, dataset, out_dir, filename, show=False):
example = dataset.prepare_train_data(idx)
points = example['points']._data.numpy()
gt_seg = example['pts_semantic_mask']._data.numpy()
show_seg_result(points, gt_seg.copy(), None, out_dir, filename, np.array(dataset.PALETTE), dataset.ignore_index, show=show, snapshot=True) |
def _get_optimizer_state(optimizer):
states = loads(optimizer._updaters[0].get_states(dump_optimizer=False))
result_states = {}
for (state_key, state_tuple) in states.items():
for (state_ind, state) in enumerate(state_tuple):
result_states[f'opt_state__{state_key}__{state_ind}'] = state.asnumpy()
return result_states |
def mlp_module(x0, x1):
h1_0 = PF.affine(x0, 100, name='affine1_0')
h1_1 = PF.affine(x1, 100, name='affine1_0')
h1 = F.tanh((h1_0 + h1_1))
h2 = F.tanh(PF.affine(h1, 50, name='affine2'))
y0 = PF.affine(h2, 10, name='affiney_0')
y1 = PF.affine(h2, 10, name='affiney_1')
return (y0, y1) |
def _gen_torch_functional_registered_ops():
ops = ['stft', 'istft', 'lu', 'lu_unpack', 'cdist', 'norm', 'unique', 'unique_consecutive']
return set((getattr(torch.functional, name) for name in ops)) |
def base_axis_1_reshape_with_neg_1(x):
h = PF.convolution(x, 3, (3, 3), pad=(0, 0), name='c1', base_axis=1)
y = F.reshape(h, shape=(1, 18, (- 1)))
return y |
def xchg(locked: dace.int32[1], output: dace.int32[20]):
for i in dace.map[0:20]:
with dace.tasklet:
(l >> locked((- 1), (lambda old, new: new)))
(out >> output[i])
l = 4
out = l |
def adaptive_bins(hist, threshold):
new = hist.copy()
peak = hist.max()
peak_depth = np.where((hist == peak))[0]
delta_hist = np.diff(hist, n=1, axis=0)
left = peak_depth
right = peak_depth
i = np.array([peak_depth[0]])
while 1:
new[[i]] = 0
if (i >= 254):
right = np.array([254])
break
if (delta_hist[i] < 0):
i = (i + 1)
elif (hist[i] <= (threshold * peak)):
right = i
break
else:
i = (i + 1)
i = np.array([(peak_depth[0] - 1)])
while 1:
new[[(i + 1)]] = 0
if (i <= 0):
left = np.array([0])
break
if (delta_hist[i] > 0):
i = (i - 1)
elif (hist[i] <= (threshold * peak)):
left = (i + 1)
break
else:
i = (i - 1)
return [new, left[0], right[0]] |
def get_laplacian(adjacency: sparse.csr_matrix) -> sparse.csr_matrix:
weights = adjacency.dot(np.ones(adjacency.shape[0]))
return (sparse.diags(weights) - adjacency) |
def build_global_POI_checkin_graph(df, exclude_user=None):
G = nx.DiGraph()
users = list(set(df['user_id'].to_list()))
if (exclude_user in users):
users.remove(exclude_user)
loop = tqdm(users)
for user_id in loop:
user_df = df[(df['user_id'] == user_id)]
for (i, row) in user_df.iterrows():
node = row['POI_id']
if (node not in G.nodes()):
G.add_node(row['POI_id'], checkin_cnt=1, poi_catid=row['POI_catid'], poi_catid_code=row['POI_catid_code'], poi_catname=row['POI_catname'], latitude=row['latitude'], longitude=row['longitude'])
else:
G.nodes[node]['checkin_cnt'] += 1
previous_poi_id = 0
previous_traj_id = 0
for (i, row) in user_df.iterrows():
poi_id = row['POI_id']
traj_id = row['trajectory_id']
if ((previous_poi_id == 0) or (previous_traj_id != traj_id)):
previous_poi_id = poi_id
previous_traj_id = traj_id
continue
if G.has_edge(previous_poi_id, poi_id):
G.edges[(previous_poi_id, poi_id)]['weight'] += 1
else:
G.add_edge(previous_poi_id, poi_id, weight=1)
previous_traj_id = traj_id
previous_poi_id = poi_id
return G |
class ProgramGraphNode():
def __init__(self, index: int, offset: int=0, basic_block: (BasicBlock | None)=None, is_artificial: bool=False) -> None:
self._index = index
self._offset = offset
self._basic_block = basic_block
self._is_artificial = is_artificial
self._predicate_id: (int | None) = None
def index(self) -> int:
return self._index
def offset(self) -> int:
return self._offset
def offset(self, offset: int) -> None:
self._offset = offset
def basic_block(self) -> (BasicBlock | None):
return self._basic_block
def is_artificial(self) -> bool:
return self._is_artificial
def predicate_id(self) -> (int | None):
return self._predicate_id
_id.setter
def predicate_id(self, predicate_id: int) -> None:
self._predicate_id = predicate_id
def __eq__(self, other: Any) -> bool:
if (not isinstance(other, ProgramGraphNode)):
return False
if (self is other):
return True
return (self._index == other.index)
def __hash__(self) -> int:
return (31 + (17 * self._index))
def __str__(self) -> str:
result = f'ProgramGraphNode({self._index})'
if (self._predicate_id is not None):
result += f'''
predicate_id {self._predicate_id}'''
if (self._basic_block is not None):
instructions = []
for instr in self._basic_block:
arg = instr.arg
if isinstance(arg, BasicBlock):
arg = 'ProgramGraphNode'
elif isinstance(arg, Compare):
arg = arg.name
elif (arg is UNSET):
arg = ''
else:
arg = repr(arg)
formatted = instr.name
if (arg != ''):
formatted += f' {arg}'
instructions.append(formatted)
result += ('\n' + '\n'.join(instructions))
return result
def __repr__(self) -> str:
return f'ProgramGraphNode(index={self._index}, basic_block={self._basic_block})' |
class DocumentEncoder(nn.Module):
def __init__(self, hidden_dim, char_filters, n_layers=2):
super().__init__()
glove_weights = F.normalize(GLOVE.weights())
turian_weights = F.normalize(TURIAN.weights())
self.glove = nn.Embedding(glove_weights.shape[0], glove_weights.shape[1])
self.glove.weight.data.copy_(glove_weights)
self.glove.weight.requires_grad = False
self.turian = nn.Embedding(turian_weights.shape[0], turian_weights.shape[1])
self.turian.weight.data.copy_(turian_weights)
self.turian.weight.requires_grad = False
self.char_embeddings = CharCNN(char_filters)
self.lstm = nn.LSTM(((glove_weights.shape[1] + turian_weights.shape[1]) + char_filters), hidden_dim, num_layers=n_layers, bidirectional=True, batch_first=True)
self.emb_dropout = nn.Dropout(0.5, inplace=True)
self.lstm_dropout = nn.Dropout(0.2, inplace=True)
def forward(self, doc):
embeds = [self.embed(s) for s in doc.sents]
(packed, reorder) = pack(embeds)
self.emb_dropout(packed[0])
(output, _) = self.lstm(packed)
self.lstm_dropout(output[0])
states = unpack_and_unpad(output, reorder)
return (torch.cat(states, dim=0), torch.cat(embeds, dim=0))
def embed(self, sent):
glove_embeds = self.glove(lookup_tensor(sent, GLOVE))
tur_embeds = self.turian(lookup_tensor(sent, TURIAN))
char_embeds = self.char_embeddings(sent)
embeds = torch.cat((glove_embeds, tur_embeds, char_embeds), dim=1)
return embeds |
class TensorforceAgent(Agent):
def __init__(self, observation_space, action_space, directory):
self.observation_space = observation_space
self.action_space = action_space
self.directory = directory
self.agent = None
def train(self, env, nb_steps):
try:
print('[train] Loading weights from {}'.format(self.directory))
self.agent.restore_model(directory=self.directory)
print('[train] Successfully loaded weights from {}'.format(self.directory))
except ValueError:
print('[train] Pretrained model {} not found. Starting from scratch.'.format(self.directory))
print("[train] Training '{}'".format(type(self).__name__))
step_count = 0
episode_count = 1
while (step_count < nb_steps):
episode_step_count = 0
obs = env.reset()
done = False
total_rew = 0
while (not done):
action = self.agent.act(obs)
(obs, rew, done, info) = env.step(action)
total_rew += rew
self.agent.observe(reward=rew, terminal=done)
episode_step_count += 1
step_count += episode_step_count
print('[train] Episode {:3} | Steps Taken: {:3} | Total Steps: Taken {:6}/{:6} | Total reward: {}'.format(episode_count, episode_step_count, step_count, nb_steps, total_rew))
episode_count += 1
print('[train] Finished training')
print("[train] Saved weights to '{}'".format(self.directory))
self.agent.save_model(directory=self.directory)
print("[train] Successfully saved weights to '{}'".format(self.directory))
def test(self, env):
try:
print('[test] Loading weights from {}'.format(self.directory))
self.agent.restore_model(directory=self.directory)
print('[test] Successfully loaded weights from {}'.format(self.directory))
except ValueError:
print('[test] Unable to find pretrained model {}. Aborting.'.format(self.directory))
return
print("[test] Running '{}'".format(type(self).__name__))
obs = env.reset()
done = False
total_rew = 0
while (not done):
action = self.agent.act(obs)
(obs, rew, done, info) = env.step(action)
total_rew += rew
self.agent.observe(reward=rew, terminal=done)
print(('[test] Total reward: ' + str(total_rew)))
print('[test] Finished test.')
print("[test] Saved weights to '{}'".format(self.directory))
self.agent.save_model(directory=self.directory)
print("[test] Successfully saved weights to '{}'".format(self.directory))
def submit(self, env):
try:
print("[submit] Loading weights from '{}'".format(self.directory))
self.agent.restore_model(directory=self.directory)
print("[submit] Successfully loaded weights from '{}'".format(self.directory))
except ValueError:
print("[submit] Unable to find pretrained model from '{}'. Aborting.".format(self.directory))
return
print("[submit] Running '{}'".format(type(self).__name__))
obs = env.reset()
episode_count = 1
step_count = 0
total_rew = 0
try:
while True:
action = self.act(obs)
(obs, rew, done, info) = env.step(action)
total_rew += rew
step_count += 1
if done:
print('[submit] Episode {} | Steps Taken: {:3} | Total reward: {}'.format(episode_count, step_count, total_rew))
obs = env.reset()
episode_count += 1
step_count = 0
total_rew = 0
except TypeError:
pass
print("[submit] Finished running '{}' on Server environment. Submitting results to server...".format(type(self).__name__))
env.submit()
print('[submit] Submitted results successfully!')
def act(self, obs):
return self.agent.act(obs) |
def maybe_download_and_extract_netflix_data(data_dir, force_overwrite=False):
write_path = os.path.join(data_dir, 'netflix-prize.zip')
zip_url = '
if (not os.path.isfile(write_path)):
os.makedirs(data_dir, exist_ok=True)
print('Zip not downloaded. Downloading now...')
save_zip_data(write_path, zip_url)
print('Zip downloaded')
else:
print('Zip already downloaded')
extract_destination = os.path.join(data_dir, 'netflix-prize')
if os.path.isdir(extract_destination):
if (not force_overwrite):
print('seems extracted datadir already exists, and not forcing overwrite. Exiting.')
return
else:
print('Deleting extracted-lib and recreating...')
shutil.rmtree(extract_destination)
print('unzipping data')
with zipfile.ZipFile(write_path, 'r') as zip_ref:
zip_ref.extractall(extract_destination)
print('all extracted...') |
def save_md5(files, out_file):
md5_dict = {}
for file in files:
md5_dict[file] = get_md5(file)
save_pkl(md5_dict, out_file) |
class stylegenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=5, opt=None):
super(stylegenerator, self).__init__()
assert ((type(input_nc) == list) and (len(input_nc) == 3)), 'The AttModule take input_nc in format of list only!!'
self.gpu_ids = gpu_ids
self.model = Model(input_nc, output_nc, ngf, norm_layer, use_dropout, n_blocks, gpu_ids, padding_type, n_downsampling=n_downsampling, opt=opt)
def forward(self, input):
return self.model(input) |
def main():
parser = argparse.ArgumentParser(prog='skyline', description='Skyline: Interactive Neural Network Performance Profiler, Visualizer, and Debugger for PyTorch')
parser.add_argument('-v', '--version', action='store_true', help='Print the version and exit.')
subparsers = parser.add_subparsers(title='Commands')
skyline.commands.interactive.register_command(subparsers)
skyline.commands.memory.register_command(subparsers)
skyline.commands.time.register_command(subparsers)
args = parser.parse_args()
if args.version:
print('Skyline Command Line Interface', ('v' + skyline.__version__))
return
if ('func' not in args):
parser.print_help()
sys.exit(1)
args.func(args) |
def digits_format(testdir):
module = testdir.make_importable_pyfile(hook='\n import string\n import schemathesis\n from hypothesis import strategies as st\n\n schemathesis.openapi.format(\n "digits",\n st.text(\n min_size=1,\n alphabet=st.characters(\n whitelist_characters=string.digits,\n whitelist_categories=()\n )\n )\n )\n ')
(yield module)
unregister_string_format('digits') |
class NarrowIEOpenIECombiner(object):
def __init__(self, oie_data_dir, IDF_path, csv_path, SUBWORDUNIT, sp_size, number_of_clusters=50, stemming=False, stopwords=True, SUBWORD_UNIT_COMBINATION='avg', path_to_embeddings=None):
self.oie_data_dir = oie_data_dir
self.csv_path = csv_path
self.number_of_clusters = number_of_clusters
self.SUBWORD_UNIT_COMBINATION = SUBWORD_UNIT_COMBINATION
self.stemming = stemming
self.stopwords = stopwords
self.IDF_path = IDF_path
self.filter_data_path = (IDF_path.rsplit('/', maxsplit=1)[0] + '/')
self.subwordunit = SUBWORDUNIT
if SUBWORDUNIT:
self.sp_size = str(sp_size)
else:
self.sp_size = ''
self.ELMo_options_path = ''
self.ELMo_weights_path = ''
if (not path_to_embeddings):
self.path_to_embeddings = 'SORE/data/filter_data/elmo_pubmed/'
self.check_for_embeddings()
def check_for_embeddings(self):
types = ['*.hdf5', '*.json']
embedding_files = []
for file_type in types:
embedding_files.extend(glob.glob((self.path_to_embeddings + file_type)))
if (embedding_files == []):
print('No embedding files found, beginning download of ELMo PubMed files.')
w = '
o = '
wget.download(w, (self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5'))
wget.download(o, (self.path_to_embeddings + 'ELmo_PubMed_options.json'))
self.ELMo_weights_path = (self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5')
self.ELMo_options_path = (self.path_to_embeddings + 'ELmo_PubMed_options.json')
elif ((self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5') in embedding_files):
self.ELMo_weights_path = (self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5')
self.ELMo_options_path = (self.path_to_embeddings + 'ELmo_PubMed_options.json')
print('Found ELMo PubMed embeddings')
pass
else:
print('Assuming the ELMo PubMed embeddings are correctly set in {}'.format(self.path_to_embeddings))
def prepare_narrowIE_embeddings(self, prefix, sp_model_path):
settings = '{pr}[{num_clusters}]_{sp}{w}_{stem}_{stop}'.format(pr=prefix, num_clusters=self.number_of_clusters, sp=(self.sp_size + '_'), w=str(self.SUBWORD_UNIT_COMBINATION), stem=str(self.stemming), stop=str(self.stopwords))
embedder = fu.PrepareEmbeddings(prefix, sp_model_path, self.sp_size, self.IDF_path, self.csv_path, self.ELMo_options_path, self.ELMo_weights_path, SUBWORD_UNIT_COMBINATION=self.SUBWORD_UNIT_COMBINATION, subwordunits=self.subwordunit, stemming=self.stemming, stopwords=self.stopwords)
if (not os.path.exists((self.filter_data_path + 'vectors/nIE_phrases_{settings}.pkl'.format(settings=settings)))):
try:
narrowIE_data = embedder.load_narrowIE_data()
narrowIE_embeddings = embedder.embed_all_narrowIE_phrases(narrowIE_data)
except TypeError:
print('Narrow IE arguments not properly embedded.')
return
with open((self.filter_data_path + 'vectors/nIE_phrases_{settings}.pkl'.format(settings=settings)), 'wb') as f:
pickle.dump(narrowIE_data, f)
with open((self.filter_data_path + 'vectors/nIE_emb_{settings}.pkl'.format(settings=settings)), 'wb') as f:
pickle.dump(narrowIE_embeddings, f)
else:
with open((self.filter_data_path + 'vectors/nIE_phrases_{settings}.pkl'.format(settings=settings)), 'rb') as f:
narrowIE_data = pickle.load(f)
with open((self.filter_data_path + 'vectors/nIE_emb_{settings}.pkl'.format(settings=settings)), 'rb') as f:
narrowIE_embeddings = pickle.load(f)
return (narrowIE_data, narrowIE_embeddings, embedder)
def get_docid_from_filename(self, filename, output_name=False):
if output_name:
return (((self.oie_data_dir + 'processed/') + filename.rsplit('/', maxsplit=1)[1][:(- 4)]) + '_processed.txt')
return filename.rsplit('/', maxsplit=1)[1][:(- 4)]
def OIE_files_to_filter(self):
input_files = glob.glob((self.oie_data_dir + '*.txt'))
doc_ids_for_filtering = []
with open(self.csv_path, 'r') as csv_f:
reader = csv.DictReader(csv_f)
for row in reader:
doc_ids_for_filtering.append(row['doc_id'])
doc_ids_for_filtering = list(set(doc_ids_for_filtering))
return [f for f in input_files if (self.get_docid_from_filename(f) in doc_ids_for_filtering)]
def run(self, prefix, filter_settings, output_dir, irrelevant_cluster_ids, num_clusters_to_drop=2, print_stats=False, print_clusters=False, plot=False, cluster_names=None):
sp_model_path = (self.filter_data_path + '{}_{}.model'.format(prefix, self.sp_size))
(narrowIE_phrases, narrowIE_embeddings, embedder) = self.prepare_narrowIE_embeddings(prefix, sp_model_path)
clusterer = fu.ClusterTradeOffs(self.filter_data_path, self.number_of_clusters, self.sp_size, self.stemming, self.stopwords)
km_model = clusterer.get_Kmeans_model(prefix, narrowIE_phrases, narrowIE_embeddings)
(clusters, results) = clusterer.cluster(km_model, narrowIE_phrases, narrowIE_embeddings)
clusters_to_drop = clusterer.cluster_insight(results, num_clusters_to_drop)
print('Dropping {} for size of the clusters, and {} because selected'.format(str(clusters_to_drop), str(irrelevant_cluster_ids)))
clusters_to_drop += irrelevant_cluster_ids
filterer = fu.SoreFilter(self.oie_data_dir, self.csv_path, self.IDF_path, self.subwordunit, sp_model_path, self.ELMo_weights_path, self.ELMo_options_path, filter_settings)
filterer.start_filtering(output_dir, prefix, self.number_of_clusters, narrowIE_phrases, narrowIE_embeddings, embedder, km_model, clusters_to_drop, print_stats)
if plot:
if cluster_names:
category_list = [x for x in cluster_names.values()]
else:
category_list = [x for x in range(self.number_of_clusters)]
digits_proj = TSNE(random_state=self.randomstate).fit_transform(clusters)
clusterer.palplot(digits_proj, km_model, category_list) |
class LieAlgebras(Category_over_base_ring):
_method
def super_categories(self):
return [Modules(self.base_ring())]
class SubcategoryMethods():
def Nilpotent(self):
return self._with_axiom('Nilpotent')
Graded = LazyImport('sage.categories.graded_lie_algebras', 'GradedLieAlgebras', as_name='Graded')
def _repr_object_names(self):
base = self.base()
if isinstance(base, Category):
if isinstance(base, JoinCategory):
name = (('(' + ' and '.join((C._repr_object_names() for C in base.super_categories()))) + ')')
else:
name = base._repr_object_names()
else:
name = base
return 'Lie algebras over {}'.format(name)
def example(self, gens=None):
if (gens is None):
from sage.combinat.symmetric_group_algebra import SymmetricGroupAlgebra
from sage.rings.rational_field import QQ
gens = SymmetricGroupAlgebra(QQ, 3).algebra_generators()
from sage.categories.examples.lie_algebras import Example
return Example(gens)
WithBasis = LazyImport('sage.categories.lie_algebras_with_basis', 'LieAlgebrasWithBasis', as_name='WithBasis')
class FiniteDimensional(CategoryWithAxiom_over_base_ring):
WithBasis = LazyImport('sage.categories.finite_dimensional_lie_algebras_with_basis', 'FiniteDimensionalLieAlgebrasWithBasis', as_name='WithBasis')
def extra_super_categories(self):
if (self.base_ring() in Sets().Finite()):
return [Sets().Finite()]
return []
class Nilpotent(CategoryWithAxiom_over_base_ring):
class ParentMethods():
_method
def step(self):
def is_nilpotent(self):
return True
class ParentMethods():
def bracket(self, lhs, rhs):
if (lhs in LieAlgebras):
if (rhs in LieAlgebras):
return lhs.product_space(rhs)
return lhs.ideal(rhs)
elif (rhs in LieAlgebras):
return rhs.ideal(lhs)
return self(lhs)._bracket_(self(rhs))
def universal_enveloping_algebra(self):
return self.lift.codomain()
_method(optional=True)
def _construct_UEA(self):
_method(optional=True)
def module(self):
_method(optional=True)
def from_vector(self, v, order=None, coerce=False):
_attribute
def lift(self):
M = LiftMorphism(self, self._construct_UEA())
M.register_as_coercion()
return M
def subalgebra(self, gens, names=None, index_set=None, category=None):
raise NotImplementedError('subalgebras not yet implemented: see #17416')
def ideal(self, *gens, **kwds):
raise NotImplementedError('ideals not yet implemented: see #16824')
def is_ideal(self, A):
if (A == self):
return True
raise NotImplementedError('ideals not yet implemented: see #16824')
_method(optional=True)
def killing_form(self, x, y):
def is_abelian(self):
G = self.lie_algebra_generators()
if (G not in FiniteEnumeratedSets()):
raise NotImplementedError('infinite number of generators')
zero = self.zero()
return all(((x._bracket_(y) == zero) for x in G for y in G))
def is_commutative(self):
return self.is_abelian()
_method(optional=True)
def is_solvable(self):
_method(optional=True)
def is_nilpotent(self):
def bch(self, X, Y, prec=None):
if ((self not in LieAlgebras.Nilpotent) and (prec is None)):
raise ValueError('the Lie algebra is not known to be nilpotent, so you must specify the precision')
from sage.algebras.lie_algebras.bch import bch_iterator
if (prec is None):
return self.sum((Z for Z in bch_iterator(X, Y)))
bch = bch_iterator(X, Y)
return self.sum((next(bch) for k in range(prec)))
baker_campbell_hausdorff = bch
_method(optional=True)
def lie_group(self, name='G', **kwds):
def trivial_representation(self):
from sage.algebras.lie_algebras.representation import TrivialRepresentation
return TrivialRepresentation(self)
def representation(self, f=None, index_set=None, on_basis=False, **kwargs):
if ((f is None) and (on_basis is False) and (index_set is None)):
return self.trivial_representation(**kwargs)
from sage.algebras.lie_algebras.representation import RepresentationByMorphism
return RepresentationByMorphism(self, f, index_set, on_basis, **kwargs)
def _test_jacobi_identity(self, **options):
tester = self._tester(**options)
elts = tester.some_elements()
jacobi = (lambda x, y, z: ((self.bracket(x, self.bracket(y, z)) + self.bracket(y, self.bracket(z, x))) + self.bracket(z, self.bracket(x, y))))
zero = self.zero()
for x in elts:
for y in elts:
if (x == y):
continue
for z in elts:
tester.assertEqual(jacobi(x, y, z), zero)
def _test_antisymmetry(self, **options):
tester = self._tester(**options)
elts = tester.some_elements()
zero = self.zero()
for x in elts:
tester.assertEqual(self.bracket(x, x), zero)
def _test_distributivity(self, **options):
tester = self._tester(**options)
S = tester.some_elements()
from sage.misc.misc import some_tuples
for (x, y, z) in some_tuples(S, 3, tester._max_runs):
tester.assertEqual(self.bracket(x, (y + z)), (self.bracket(x, y) + self.bracket(x, z)))
tester.assertEqual(self.bracket((x + y), z), (self.bracket(x, z) + self.bracket(y, z)))
class ElementMethods():
_binop
def bracket(self, rhs):
return self._bracket_(rhs)
_method
def _bracket_(self, y):
_method(optional=True)
def to_vector(self, order=None):
_method(optional=True)
def lift(self):
def killing_form(self, x):
return self.parent().killing_form(self, x)
def exp(self, lie_group=None):
if (lie_group is None):
lie_group = self.parent().lie_group()
return lie_group.exp(self) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('axis', [0, 1, 2, (- 1), (- 2), (- 3)])
.parametrize('n', [3, 5])
def test_top_n_error_forward(seed, axis, n, ctx, func_name):
ishape = [5, 6, 7]
rng = np.random.RandomState(seed)
l_shape = list(ishape)
l_shape[axis] = 1
n_class = ishape[axis]
inputs = [((rng.rand(5, 6, 7).astype(np.float32) * 0.9) + 0.05), rng.randint(0, n_class, size=l_shape).astype(int)]
ref = ref_top_n_error(inputs[0], inputs[1], axis, n)
x = nn.Variable(ishape)
l = nn.Variable(l_shape)
y = F.top_n_error(x, l, axis, n)
x.d = inputs[0]
l.d = inputs[1]
y.forward()
res = y.d
atol_f = 1e-06
assert_allclose(ref, res, atol=atol_f) |
_toolkit()
class GoogleHome(FunctionToolkit):
name_for_human = 'Google Home'
description_for_human = 'Toolkit for controlling and managing Google Home devices.'
name_for_model = 'GoogleHome'
description_for_model = 'A toolkit for controlling and managing Google Home devices, enabling users to control smart home devices, play media, set reminders, ask questions, and retrieve device status and actions.'
tool_classes = [GoogleHomeSearchDevices, GoogleHomeListDeviceActions, GoogleHomeControlDevice, GoogleHomeScheduleDeviceAction, GoogleHomePlayMedia, GoogleHomeSetReminder, GoogleHomeListReminders, GoogleHomeAskQuestion] |
class RealizationsCategory(RegressiveCovariantConstructionCategory):
_functor_category = 'Realizations' |
def lr_func_steps_with_decay(cur_iter):
ind = get_step_index(cur_iter)
return (cfg.SOLVER.BASE_LR * (cfg.SOLVER.GAMMA ** ind)) |
def get_graph(arr, passable='empty'):
graph = nx.Graph()
(width, height) = arr.shape
size = (width * height)
graph.add_nodes_from(range(size))
for u in range(size):
(ux, uy) = ((u // width), (u % width))
if (arr[(ux, uy)] != passable):
continue
neighbs_xy = [((ux - 1), uy), (ux, (uy - 1)), ((ux + 1), uy), (ux, (uy + 1))]
neighbs = [((x * width) + y) for (x, y) in neighbs_xy]
for (v, (vx, vy)) in zip(neighbs, neighbs_xy):
if ((not (0 <= v < size)) or (not ((0 <= vx < width) and (0 <= vy < height))) or (arr[(vx, vy)] != passable)):
continue
graph.add_edge(u, v)
return graph |
def MAE(original_path, approximate_path):
with open(original_path, 'r') as fo:
org_line_list = fo.readlines()
with open(approximate_path, 'r') as fa:
app_line_list = fa.readlines()
org = [list(filter((lambda a: (a != ' ')), list(i[:(- 1)]))) for i in org_line_list]
app = [list(filter((lambda a: (a != ' ')), list(i[:(- 1)]))) for i in app_line_list]
if (len(org_line_list) != len(app_line_list)):
print('ERROR! sizes of input files are not equal! Aborting...')
return (- 1)
num_vec = len(org)
num_pos = len(org[0])
maxnum = ((2 ** num_pos) - 1)
err = []
for i in range(num_vec):
orgnum = int(''.join(org[i]), 2)
appnum = int(''.join(app[i]), 2)
err.append(np.abs((orgnum - appnum)))
return (np.mean(err) / maxnum) |
def main():
evaluator = CoQAEvaluator(OPTS.data_file)
if OPTS.human:
print(json.dumps(evaluator.human_performance(), indent=2))
if OPTS.pred_file:
with open(OPTS.pred_file) as f:
pred_data = CoQAEvaluator.preds_to_dict(OPTS.pred_file)
print(json.dumps(evaluator.model_performance(pred_data), indent=2)) |
class LogisticRegression(GNNModel):
def __init__(self, features, graph_adj, targets, nodes_to_consider, weight_decay, normalize_features):
self.normalize_features = normalize_features
with tf.name_scope('extract_relevant_nodes'):
targets = tf.gather(targets, nodes_to_consider)
super().__init__(features, graph_adj, targets)
self.nodes_to_consider = nodes_to_consider
self.weight_decay = weight_decay
self._build_model_graphs()
def _inference(self):
with tf.name_scope('inference'):
weights = tf.get_variable('weights', [int(self.features.get_shape()[1]), self.targets.shape[1]], dtype=tf.float32, initializer=tf.glorot_uniform_initializer(), regularizer=slim.l2_regularizer(self.weight_decay))
output = tf.sparse_tensor_dense_matmul(self.features, weights)
output = tf.contrib.layers.bias_add(output)
with tf.name_scope('extract_relevant_nodes'):
return tf.gather(output, self.nodes_to_consider)
def _preprocess_features(self, features):
if self.normalize_features:
features = row_normalize(features)
return to_sparse_tensor(features)
def _preprocess_adj(self, graph_adj):
return to_sparse_tensor(graph_adj) |
class DataSetIter(BatchIter):
def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False, num_workers=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, batch_sampler=None, max_tokens=2500):
assert isinstance(dataset, DataSet)
if (sampler is not None):
dataset = DataSetGetter(dataset, as_numpy, max_tokens=max_tokens)
else:
dataset = DataSetGetter(dataset, as_numpy, max_tokens=(- 1))
collate_fn = dataset.collate_fn
if (batch_sampler is not None):
batch_size = 1
sampler = None
drop_last = False
super().__init__(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, collate_fn=collate_fn, batch_sampler=batch_sampler)
def __iter__(self):
self.init_iter()
for (indices, batch_x, batch_y) in self.dataiter:
self.cur_batch_indices = indices
(yield (batch_x, batch_y)) |
class Trainer():
def __init__(self, corpus, optimizers, translator, batch_size=2, backbool=False, penalty_tuning=None, cosinealpha=None):
self.corpus = corpus
self.translator = translator
self.optimizers = optimizers
self.batch_size = batch_size
self.backbool = backbool
self.penalty_tuning = penalty_tuning
self.cosinealpha = cosinealpha
self.reset_stats()
def step(self, nstep, log_interval, device, args=None):
verbose = ((nstep % log_interval) == 0)
backbool = self.backbool
for optimizer in self.optimizers:
optimizer.zero_grad()
t = time.time()
if (nstep > args.startfrom):
(src, trg) = self.corpus.next_batch(self.batch_size, noop=False)
if (nstep <= args.startfrom):
(src, trg) = self.corpus.next_batch(self.batch_size, noop=True)
return
(print('BACKTRANSLATION TRAINING PAIR') if (self.backbool and verbose) else print('', end=''))
(print('DENOISING TRAINING PAIR') if ((not self.backbool) and verbose) else print('', end=''))
(print('SOURCE: {}'.format(src[0])) if verbose else print('', end=''))
sys.stdout.flush()
batchtrg_word_count = sum([(len(data.tokenize(sentence)) + 1) for sentence in trg])
self.src_word_count += sum([(len(data.tokenize(sentence)) + 1) for sentence in src])
self.trg_word_count += batchtrg_word_count
self.io_time += (time.time() - t)
t = time.time()
if backbool:
if (not verbose):
if (not args.max_cosine):
(loss, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine)
else:
(loss, cosineloss, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine)
elif (not args.max_cosine):
(loss, preds, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine, find_preds=True)
else:
(loss, cosineloss, preds, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine, find_preds=True)
(print('TARGET: {}'.format(trg[0])) if verbose else print('', end=''))
(print('PREDICTIONS FOR BACKTRANSLATION PAIR') if verbose else print('', end=''))
(print(preds[0]) if verbose else print('', end=''))
else:
psencoder_embeddings = (None if (not args.denoi_enc_loss) else self.translator.psencoder_embeddings)
if (not verbose):
if (not args.max_cosine):
(loss, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine, word_embeddings=psencoder_embeddings)
else:
(loss, cosineloss, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine, word_embeddings=psencoder_embeddings)
elif (not args.max_cosine):
(loss, preds, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine, find_preds=True, word_embeddings=psencoder_embeddings)
else:
(loss, cosineloss, preds, hiddensrc, hiddenpssrc) = self.translator.score(src, trg, train=True, backbool=backbool, verbose=verbose, find_cosine=args.max_cosine, find_preds=True, word_embeddings=psencoder_embeddings)
(print('TARGET: {}'.format(trg[0])) if verbose else print('', end=''))
(print('PREDICTIONS FOR DENOISING PAIR') if verbose else print('', end=''))
(print(preds[0]) if verbose else print('', end=''))
sys.stdout.flush()
if args.max_cosine:
(print('loss,cosineloss', loss, cosineloss) if verbose else print('', end=''))
loss.add(cosineloss.mul(args.cosinealpha))
self.forward_time += (time.time() - t)
self.nsteps += 1
t = time.time()
if (backbool or (not args.denoi_enc_loss)):
cebatchloss = loss.data
loss.div(self.batch_size).backward()
else:
cebatchloss = loss.data
hiddensrc = torch.transpose(hiddensrc, 0, 1).contiguous().view(hiddensrc.size()[1], (- 1))
hiddenpssrc = torch.transpose(hiddenpssrc, 0, 1).contiguous().view(hiddenpssrc.size()[1], (- 1))
embeddloss = torch.nn.functional.cosine_similarity(hiddensrc, hiddenpssrc).add(1).mul((- args.cosinealpha))
(print('INSTANCE EMBEDDLOSS : {}'.format(embeddloss.data[0])) if verbose else print('', end=''))
embbatchloss = embeddloss.data.sum()
embeddloss = embeddloss.sum().div(self.batch_size)
(print('BATCH EMBEDDLOSS : {}'.format(embeddloss.data)) if verbose else print('', end=''))
loss.div(self.batch_size).backward()
sys.stdout.flush()
self.celoss += cebatchloss
for optimizer in self.optimizers:
optimizer.step()
self.backward_time += (time.time() - t)
def reset_stats(self):
self.src_word_count = 0
self.trg_word_count = 0
self.io_time = 0
self.forward_time = 0
self.backward_time = 0
self.celoss = 0
self.sentstats = np.zeros(3)
self.nsteps = 0
self.nbacksteps = 0
def perplexity_per_word(self):
return np.exp((self.celoss / max(self.trg_word_count, 1e-08)))
def total_time(self):
return ((self.io_time + self.forward_time) + self.backward_time)
def words_per_second(self):
return ((self.src_word_count / max(self.total_time(), 1e-08)), (self.trg_word_count / max(self.total_time(), 1e-08)))
def sent_stats(self):
return (self.sentstats / self.nbacksteps) |
def register_Ns3Dot11sHwmpRtable_methods(root_module, cls):
cls.add_constructor([param('ns3::dot11s::HwmpRtable const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddPrecursor', 'void', [param('ns3::Mac48Address', 'destination'), param('uint32_t', 'precursorInterface'), param('ns3::Mac48Address', 'precursorAddress'), param('ns3::Time', 'lifetime')])
cls.add_method('AddProactivePath', 'void', [param('uint32_t', 'metric'), param('ns3::Mac48Address', 'root'), param('ns3::Mac48Address', 'retransmitter'), param('uint32_t', 'interface'), param('ns3::Time', 'lifetime'), param('uint32_t', 'seqnum')])
cls.add_method('AddReactivePath', 'void', [param('ns3::Mac48Address', 'destination'), param('ns3::Mac48Address', 'retransmitter'), param('uint32_t', 'interface'), param('uint32_t', 'metric'), param('ns3::Time', 'lifetime'), param('uint32_t', 'seqnum')])
cls.add_method('DeleteProactivePath', 'void', [])
cls.add_method('DeleteProactivePath', 'void', [param('ns3::Mac48Address', 'root')])
cls.add_method('DeleteReactivePath', 'void', [param('ns3::Mac48Address', 'destination')])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetPrecursors', 'ns3::dot11s::HwmpRtable::PrecursorList', [param('ns3::Mac48Address', 'destination')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUnreachableDestinations', 'std::vector< ns3::dot11s::HwmpProtocol::FailedDestination >', [param('ns3::Mac48Address', 'peerAddress')])
cls.add_method('LookupProactive', 'ns3::dot11s::HwmpRtable::LookupResult', [])
cls.add_method('LookupProactiveExpired', 'ns3::dot11s::HwmpRtable::LookupResult', [])
cls.add_method('LookupReactive', 'ns3::dot11s::HwmpRtable::LookupResult', [param('ns3::Mac48Address', 'destination')])
cls.add_method('LookupReactiveExpired', 'ns3::dot11s::HwmpRtable::LookupResult', [param('ns3::Mac48Address', 'destination')])
cls.add_static_attribute('INTERFACE_ANY', 'uint32_t const', is_const=True)
cls.add_static_attribute('MAX_METRIC', 'uint32_t const', is_const=True)
return |
def register_Ns3Dot11sIeRann_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::dot11s::IeRann const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DecrementTtl', 'void', [])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetDestSeqNumber', 'uint32_t', [])
cls.add_method('GetFlags', 'uint8_t', [])
cls.add_method('GetHopcount', 'uint8_t', [])
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMetric', 'uint32_t', [])
cls.add_method('GetOriginatorAddress', 'ns3::Mac48Address', [])
cls.add_method('GetTtl', 'uint8_t', [])
cls.add_method('IncrementMetric', 'void', [param('uint32_t', 'metric')])
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetDestSeqNumber', 'void', [param('uint32_t', 'dest_seq_number')])
cls.add_method('SetFlags', 'void', [param('uint8_t', 'flags')])
cls.add_method('SetHopcount', 'void', [param('uint8_t', 'hopcount')])
cls.add_method('SetMetric', 'void', [param('uint32_t', 'metric')])
cls.add_method('SetOriginatorAddress', 'void', [param('ns3::Mac48Address', 'originator_address')])
cls.add_method('SetTTL', 'void', [param('uint8_t', 'ttl')])
return |
def load_multiple_tracker_summaries(tracker_folder, tracker_list, cls):
data = {}
for tracker in tracker_list:
with open(os.path.join(tracker_folder, tracker, (cls + '_summary.txt'))) as f:
keys = next(f).split(' ')
done = False
while (not done):
values = next(f).split(' ')
if (len(values) == len(keys)):
done = True
data[tracker] = dict(zip(keys, map(float, values)))
return data |
class ResNet(nn.Module):
def __init__(self, last_stride, bn_norm, with_ibn, with_se, with_nl, block, layers, non_layers):
self.channel_nums = []
self.inplanes = 64
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = get_norm(bn_norm, 64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0], 1, bn_norm, with_ibn, with_se)
self.layer2 = self._make_layer(block, 128, layers[1], 2, bn_norm, with_ibn, with_se)
self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm, with_ibn, with_se)
self.layer4 = self._make_layer(block, 512, layers[3], last_stride, bn_norm, with_se=with_se)
self.random_init()
def _make_layer(self, block, planes, blocks, stride=1, bn_norm='BN', with_ibn=False, with_se=False):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), get_norm(bn_norm, (planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, with_se, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, with_se))
self.channel_nums.append(self.inplanes)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.relu(x, inplace=True)
return x
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
nn.init.normal_(m.weight, 0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[(- 1)].bn3
bn2 = self.layer2[(- 1)].bn3
bn3 = self.layer3[(- 1)].bn3
bn4 = self.layer4[(- 1)].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[(- 1)].bn2
bn2 = self.layer2[(- 1)].bn2
bn3 = self.layer3[(- 1)].bn2
bn4 = self.layer4[(- 1)].bn2
else:
logger.info('ResNet unknown block error!')
return [bn1, bn2, bn3, bn4]
def extract_feature(self, x, preReLU=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
feat1 = self.layer1(x)
feat2 = self.layer2(feat1)
feat3 = self.layer3(feat2)
feat4 = self.layer4(feat3)
if (not preReLU):
feat1 = F.relu(feat1)
feat2 = F.relu(feat2)
feat3 = F.relu(feat3)
feat4 = F.relu(feat4)
return ([feat1, feat2, feat3, feat4], F.relu(feat4))
def get_channel_nums(self):
return self.channel_nums |
def scope_dirname(scope):
slash = scope.rfind('/')
if (slash == (- 1)):
return ''
return scope[:(slash + 1)] |
def test_load():
def fake_condition(memo_info, manager, args):
if (memo_info.state == 'RAW'):
return [memo_info]
else:
return []
def fake_action(memories, args):
return (FakeProtocol('protocol'), [None], [None], [{}])
tl = Timeline()
node = FakeNode('node', tl)
assert (len(node.resource_manager.rule_manager) == 0)
rule = Rule(1, fake_action, fake_condition, None, None)
for memo_info in node.resource_manager.memory_manager:
assert (memo_info.state == 'RAW')
node.resource_manager.load(rule)
memo_array = node.resource_manager.memory_manager.memory_array
assert (len(node.resource_manager.rule_manager) == 1)
for memo_info in node.resource_manager.memory_manager:
assert (memo_info.state == 'OCCUPIED')
assert (len(node.resource_manager.waiting_protocols) == len(memo_array))
assert (len(node.resource_manager.pending_protocols) == 0)
assert (len(rule.protocols) == len(memo_array)) |
_model
def resnest50d_1s4x24d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['resnest50d_1s4x24d']
model = ResNet(ResNestBottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, block_args=dict(radix=1, avd=True, avd_first=True), **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def collate_dgl(samples):
(graphs, labels) = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
if isinstance(labels[0], torch.Tensor):
return (batched_graph, torch.stack(labels))
else:
return (batched_graph, labels) |
def run_test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache()
iou_types = ('bbox',)
if cfg.MODEL.MASK_ON:
iou_types = (iou_types + ('segm',))
if cfg.MODEL.KEYPOINT_ON:
iou_types = (iou_types + ('keypoints',))
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val):
inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=(False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY), device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder)
synchronize() |
def calc_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = remove_punctuation(ans['text'])
prediction_ = remove_punctuation(prediction)
if (ans_ == prediction_):
em = 1
break
return em |
(nopython=True, nogil=True)
def gower_distance(r0: np.ndarray, r1: np.ndarray, cat_cols_index: np.ndarray) -> float64:
dist = 0.0
for i in range(len(r0)):
if (isnan(r0[i]) and isnan(r1[i])):
dist += 1
elif (i < cat_cols_index):
dist += fabs((r0[i] - r1[i]))
elif (r0[i] != r1[i]):
dist += 1
return dist |
class TestConjugatePriors(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
np.random.seed(12345)
def test_beta_bernoulli(self):
print()
logger.info((('test_beta_bernoulli\n' + ('-' * 80)) + '\n'))
for theta in [0.21, 0.5, 0.93]:
data = (np.random.rand(1000) < theta)
theta_hat = ((1 + sum(data)) / (len(data) + 2))
dist_np = BetaBernoulli()
dist_np.update(data)
self.assertEqual(dist_np.alpha, (1 + sum(data)))
self.assertEqual(dist_np.beta, (1 + sum((1 - data))))
pred = dist_np.posterior([0, 1], log=False)
expected = np.asarray([(1 - theta_hat), theta_hat])
self.assertAlmostEqual(np.max(np.abs((pred - expected))), 0, places=6)
ts = TimeSeries.from_pd(data, freq='MS')
dist_ts = BetaBernoulli(ts[:30])
dist_ts.update(ts[30:])
self.assertEqual(dist_ts.alpha, (1 + sum(data)))
self.assertEqual(dist_ts.beta, (1 + sum((1 - data))))
pred = dist_ts.posterior(TimeSeries.from_pd([0, 1]), log=False)
self.assertAlmostEqual(np.max(np.abs((pred - expected))), 0, places=6)
def test_normal(self):
print()
logger.info((('test_normal\n' + ('-' * 80)) + '\n'))
(mu, sigma) = (5, 2)
for n in [10, 100, 1000, 100000]:
data = ((np.random.randn(n) * sigma) + mu)
dist_uni = NormInvGamma()
(pred_uni, dist_uni) = dist_uni.posterior(data[:(n // 2)], return_updated=True)
dist_multi = MVNormInvWishart()
(pred_multi, dist_multi) = dist_multi.posterior(data[:(n // 2)], return_updated=True)
self.assertAlmostEqual(np.max(np.abs((pred_uni - pred_multi))), 0, places=6)
pred_uni = dist_uni.posterior(data[(n // 2):], log=False)
pred_multi = dist_multi.posterior(data[(n // 2):], log=False)
self.assertAlmostEqual(np.max(np.abs((pred_uni - pred_multi))), 0, places=6)
if (n > 5000):
t = [0, 1, 2, 3, 4, 5]
(xhat_u, sigma_u) = dist_uni.forecast(t)
self.assertAlmostEqual(np.max([np.abs((np.array(x) - mu)) for (t, x) in xhat_u]), 0, delta=0.05)
self.assertAlmostEqual(np.max([np.abs((np.array(s) - sigma)) for (t, s) in sigma_u]), 0, delta=0.05)
(xhat_m, sigma_m) = dist_multi.forecast(t)
self.assertAlmostEqual(np.max([np.abs((np.array(x) - mu)) for (t, x) in xhat_m]), 0, delta=0.05)
self.assertAlmostEqual(np.max([np.abs((np.array(s) - sigma)) for (t, s) in sigma_m]), 0, delta=0.05)
def test_mv_normal(self):
print()
logger.info((('test_mv_normal\n' + ('-' * 80)) + '\n'))
(n, d) = (300000, 20)
mu = np.random.randn(d)
u = np.random.randn(d, d)
cov = (u.T u)
data = TimeSeries.from_pd(((np.random.randn(n, d) u) + mu), freq='1h')
dist = MVNormInvWishart(data[:5])
dist.update(data[5:(- 5)])
dist.posterior(data[(- 5):])
if (version.parse(scipy.__version__) >= version.parse('1.6.0')):
self.assertAlmostEqual(np.abs((mu - dist.mu_posterior(None).loc)).mean(), 0, delta=0.05)
self.assertAlmostEqual(np.abs((cov - dist.Sigma_posterior(None).mean())).mean(), 0, delta=0.05)
(xhat, stderr) = dist.forecast(data.time_stamps[(- 50000):])
zscores = ((xhat.to_pd() - data[(- 50000):].to_pd()) / stderr.to_pd().values)
self.assertAlmostEqual(zscores.pow(2).mean().max(), 1, delta=0.02)
def test_bayesian_linreg(self):
print()
logger.info((('test_bayesian_linreg\n' + ('-' * 80)) + '\n'))
(n, sigma) = (100000, 1.5)
(m, b) = np.random.randn(2)
t = np.linspace(0, 2, ((2 * n) + 1))
x = UnivariateTimeSeries.from_pd((((m * t) + b) + (np.random.randn(len(t)) * sigma)), name='test').to_ts()
x_train = x[:(n + 1)]
x_test = x[(n + 1):]
uni = BayesianLinReg()
(uni_posterior, uni) = uni.posterior(x_train, return_updated=True)
multi = BayesianMVLinReg()
(multi_posterior, multi) = multi.posterior(x_train, return_updated=True)
self.assertAlmostEqual(np.abs((uni_posterior - multi_posterior)).max(), 0, places=6)
(xhat_u, sigma_u) = uni.forecast(x_test.time_stamps)
zscore_u = ((xhat_u.to_pd() - x_test.to_pd()) / sigma_u.to_pd().values)
self.assertAlmostEqual(zscore_u.pow(2).mean().item(), 1, delta=0.01)
(xhat_m, sigma_m) = multi.forecast(x_test.time_stamps)
zscore_m = ((xhat_m.to_pd() - x_test.to_pd()) / sigma_m.to_pd().values)
self.assertAlmostEqual(zscore_m.pow(2).mean().item(), 1, delta=0.01)
uni_posterior = uni.posterior(x_test)
multi_posterior = multi.posterior(x_test)
self.assertAlmostEqual(np.abs((uni_posterior - multi_posterior)).max(), 0, places=6)
naive_uni = np.concatenate([uni.posterior(x_test[i:(i + 1)]) for i in range(100)])
explicit_uni = np.concatenate([uni.posterior_explicit(x_test[i:(i + 1)]) for i in range(100)])
self.assertAlmostEqual(np.abs((naive_uni - explicit_uni)).max(), 0, places=6)
naive_multi = np.concatenate([multi.posterior(x_test[i:(i + 1)]) for i in range(100)])
explicit_multi = np.concatenate([multi.posterior_explicit(x_test[i:(i + 1)]) for i in range(100)])
self.assertAlmostEqual(np.abs((naive_multi - explicit_multi)).max(), 0, places=6)
(mhat, bhat) = uni.w_0
self.assertAlmostEqual(mhat, m, delta=0.02)
self.assertAlmostEqual(bhat, b, delta=0.01)
def test_mv_bayesian_linreg(self):
print()
logger.info((('test_mv_bayesian_linreg\n' + ('-' * 80)) + '\n'))
(n, sigma) = (200000, 2)
for d in [2, 3, 4, 5, 10, 20]:
(m, b) = np.random.randn(2, d)
t = np.linspace(0, 2, ((2 * n) + 1))
x = (((m.reshape(1, d) * t.reshape((- 1), 1)) + b.reshape(1, d)) + (np.random.randn(len(t), d) * sigma))
x_train = x[:(n + 1)]
x_test = x[(n + 1):]
dist = BayesianMVLinReg()
dist.update(x_train)
post = dist.posterior(x_test)
self.assertEqual(post.shape, (n,))
naive = np.concatenate([dist.posterior(x_test[i:(i + 1)]) for i in range(100)])
explicit = np.concatenate([dist.posterior_explicit(x_test[i:(i + 1)]) for i in range(100)])
self.assertAlmostEqual(np.abs((naive - explicit)).max(), 0, delta=0.01)
(mhat, bhat) = dist.w_0
self.assertAlmostEqual(np.abs((mhat - m)).max(), 0, delta=0.05)
self.assertAlmostEqual(np.abs((bhat - b)).max(), 0, delta=0.05)
(xhat, stderr) = dist.forecast(np.arange((n + 1), ((2 * n) + 1)))
zscores = ((xhat.to_pd() - x_test) / stderr.to_pd().values)
self.assertAlmostEqual(zscores.pow(2).mean().max(), 1, delta=0.02) |
def process_test_params_for_module(test_params_dict, device, test_instance_class):
module_name = compute_module_name(test_params_dict)
test_params_dict['constructor'] = test_params_dict.get('constructor', getattr(torch.nn, module_name))
test_instance = test_instance_class(**test_params_dict)
assert test_instance.get_name().startswith('test_')
module_variant_name = (test_instance.get_name()[5:] + (('_' + device) if (device != 'cpu') else ''))
if ('constructor_args' in test_params_dict):
assert ('cpp_constructor_args' in test_params_dict), 'If `constructor_args` is present in test params dict, to enable C++ API parity test, `cpp_constructor_args` must be present in:\n{}If you are interested in adding the C++ API parity test, please see:\nNOTE [How to check NN module / functional API parity between Python and C++ frontends]. \nIf not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this.'.format(pprint.pformat(test_params_dict))
return TorchNNModuleTestParams(module_name=module_name, module_variant_name=module_variant_name, test_instance=test_instance, cpp_constructor_args=test_params_dict.get('cpp_constructor_args', ''), arg_dict=compute_arg_dict(test_params_dict, test_instance), has_parity=test_params_dict.get('has_parity', True), device=device, cpp_tmp_folder=tempfile.mkdtemp()) |
def convBlock(numIn, numOut, inputResH, inputResW, net_type, baseWidth, cardinality, stride):
numIn = int(numIn)
numOut = int(numOut)
addTable = ConcatTable()
s_list = []
if (net_type != 'no_preact'):
s_list.append(nn.BatchNorm2d(numIn))
s_list.append(nn.ReLU(True))
conv1 = nn.Conv2d(numIn, (numOut // 2), kernel_size=1)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt((1 / 2)))
s_list.append(conv1)
s_list.append(nn.BatchNorm2d((numOut // 2)))
s_list.append(nn.ReLU(True))
conv2 = nn.Conv2d((numOut // 2), (numOut // 2), kernel_size=3, stride=stride, padding=1)
if opt.init:
nn.init.xavier_normal(conv2.weight)
s_list.append(conv2)
s = nn.Sequential(*s_list)
addTable.add(s)
D = math.floor((numOut // baseWidth))
C = cardinality
s_list = []
if (net_type != 'no_preact'):
s_list.append(nn.BatchNorm2d(numIn))
s_list.append(nn.ReLU(True))
conv1 = nn.Conv2d(numIn, D, kernel_size=1, stride=stride)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt((1 / C)))
s_list.append(conv1)
s_list.append(nn.BatchNorm2d(D))
s_list.append(nn.ReLU(True))
s_list.append(pyramid(D, C, inputResH, inputResW))
s_list.append(nn.BatchNorm2d(D))
s_list.append(nn.ReLU(True))
a = nn.Conv2d(D, (numOut // 2), kernel_size=1)
a.nBranchIn = C
if opt.init:
nn.init.xavier_normal(a.weight, gain=math.sqrt((1 / C)))
s_list.append(a)
s = nn.Sequential(*s_list)
addTable.add(s)
elewiswAdd = nn.Sequential(addTable, CaddTable(False))
conv2 = nn.Conv2d((numOut // 2), numOut, kernel_size=1)
if opt.init:
nn.init.xavier_normal(conv2.weight, gain=math.sqrt((1 / 2)))
model = nn.Sequential(elewiswAdd, nn.BatchNorm2d((numOut // 2)), nn.ReLU(True), conv2)
return model |
def F(y, u, p, geometry):
return ((dot(grad(y), grad(p)) * geometry.dx) - ((u * p) * geometry.dx)) |
def _get_region(name, regions, bc_name):
try:
region = regions[name]
except IndexError:
msg = ("no region '%s' used in condition %s!" % (name, bc_name))
raise IndexError(msg)
return region |
class GemmOperation():
def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, epilogue_functor=EpilogueFunctor.LinearCombination, swizzling_functor=SwizzlingFunctor.Identity8):
self.operation_kind = OperationKind.Gemm
self.arch = arch
self.tile_description = tile_description
self.gemm_kind = gemm_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
def is_complex(self):
complex_operators = [MathOperation.multiply_add_complex, MathOperation.multiply_add_complex_gaussian, MathOperation.multiply_add_complex_fast_f32]
return (self.tile_description.math_instruction.math_operation in complex_operators)
def is_planar_complex(self):
return (self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray))
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def short_math_name(self):
if (self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian):
return ('g%s' % ShortDataTypeNames[self.accumulator_type()])
return ShortDataTypeNames[self.accumulator_type()]
def core_name(self):
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {MathOperation.xor_popc: 'xor'}
if ((self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp) or (self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp)):
math_op = self.tile_description.math_instruction.math_operation
math_op_string = (math_operations_map[math_op] if (math_op in math_operations_map.keys()) else '')
inst_shape = ('%d%d%d' % tuple(self.tile_description.math_instruction.instruction_shape))
inst_shape += math_op_string
if ((self.tile_description.math_instruction.element_a != self.A.element) and (self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator)):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return ('%s%s%s%s' % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind]))
def extended_name(self):
if self.is_complex():
extended_name = '${core_name}'
elif ((self.C.element != self.tile_description.math_instruction.element_accumulator) and (self.A.element != self.tile_description.math_instruction.element_accumulator)):
extended_name = '${element_c}_${core_name}_${element_a}'
elif ((self.C.element == self.tile_description.math_instruction.element_accumulator) and (self.A.element != self.tile_description.math_instruction.element_accumulator)):
extended_name = '${core_name}_${element_a}'
else:
extended_name = '${core_name}'
extended_name = SubstituteTemplate(extended_name, {'element_a': DataTypeNames[self.A.element], 'element_c': DataTypeNames[self.C.element], 'core_name': self.core_name()})
return extended_name
def layout_name(self):
if (self.is_complex() or self.is_planar_complex()):
return ('%s%s' % (ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]))
return ('%s%s' % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]))
def procedural_name(self):
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.B.alignment, self.C.alignment])
return SubstituteTemplate('cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}', {'opcode_class': opcode_class_name, 'extended_name': self.extended_name(), 'threadblock': threadblock, 'layout': self.layout_name(), 'alignment': ('%d' % self.A.alignment)})
def configuration_name(self):
return self.procedural_name() |
def run_experiment(method_call=None, batch_tasks=None, exp_prefix='experiment', exp_name=None, log_dir=None, script='garage.experiment.experiment_wrapper', python_command='python', dry=False, env=None, variant=None, force_cpu=False, pre_commands=None, **kwargs):
warnings.warn(DeprecationWarning('run_experiment is deprecated, and will be removed in the next release. Please use wrap_experiment instead.'))
if ((method_call is None) and (batch_tasks is None)):
raise Exception('Must provide at least either method_call or batch_tasks')
for task in (batch_tasks or [method_call]):
if (not hasattr(task, '__call__')):
raise ValueError('batch_tasks should be callable')
if (variant is None):
variant = dict()
if (batch_tasks is None):
batch_tasks = [dict(kwargs, pre_commands=pre_commands, method_call=method_call, exp_name=exp_name, log_dir=log_dir, env=env, variant=variant)]
global exp_count
if force_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
for task in batch_tasks:
call = task.pop('method_call')
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if (task.get('exp_name', None) is None):
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp, exp_count)
if (task.get('log_dir', None) is None):
task['log_dir'] = '{log_dir}/local/{exp_prefix}/{exp_name}'.format(log_dir=osp.join(os.getcwd(), 'data'), exp_prefix=exp_prefix.replace('_', '-'), exp_name=task['exp_name'])
if (task.get('variant', None) is not None):
variant = task.pop('variant')
if ('exp_name' not in variant):
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(pickle.dumps(variant)).decode('utf-8')
elif ('variant' in task):
del task['variant']
task['env'] = (task.get('env', dict()) or dict())
task['env']['GARAGE_FORCE_CPU'] = str(force_cpu)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(task, python_command=python_command, script=script)
print(command)
if dry:
return
try:
if (env is None):
env = dict()
subprocess.run(command, shell=True, env=dict(os.environ, **env), check=True)
except Exception as e:
print(e)
raise |
def setup_plot_report_loss_entries(training_type):
if ((training_type == 'classification') or (training_type == 'regression')):
entries = ['main/loss', 'val/main/loss']
elif (training_type == 'multi_regression'):
entries = ['main/loss', 'validation/main/loss', 'main/loss_click', 'validation/main/loss_click', 'main/loss_cv', 'validation/main/loss_cv']
else:
raise ValueError('Invalid training type: {}'.format(training_type))
return entries |
def quantize_model_(model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-06, max_tentatives=100, remove_weights=False, verbose=True, state_dict=None):
quantized_layers = get_layers(model, layers_to_quantize[step], remove_weights=remove_weights)
for layer in quantized_layers:
is_master_process = ((not dist.is_initialized()) or (dist.is_initialized() and (dist.get_rank() == 0)))
verbose = (verbose and is_master_process)
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(f'Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids')
weight = module.weight.data.clone()
is_bias = ('bias' in [x[0] for x in module.named_parameters()])
bias = (module.bias.data.clone() if is_bias else None)
quantizer = PQ(weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose)
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
if ((n_iter == 0) and state_dict):
centroids = torch.rand(centroids.size())
centroids.cuda()
counts_key = ((layer + '.') + 'counts')
assignment_key = ((layer + '.') + 'assignments')
counts = list(state_dict[counts_key].shape)[0]
print(layer)
print(state_dict[counts_key])
print(counts)
num_assignments = list(state_dict[assignment_key].shape)[0]
num_extra = (num_assignments - counts)
print(num_assignments)
print(num_extra)
assignments_bins = torch.arange(counts)
assignments_rand = torch.randint(0, (counts - 1), (num_extra,))
assignments = torch.cat((assignments_bins, assignments_rand), 0)
assignments.cuda()
print('assignments')
print(assignments)
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
if isinstance(module, nn.Linear):
(out_features, in_features) = map((lambda k: module.__dict__[k]), ['out_features', 'in_features'])
quantized_module = PQLinear(centroids, assignments, bias, in_features, out_features)
elif isinstance(module, nn.Embedding):
(num_embeddings, embedding_dim) = map((lambda k: module.__dict__[k]), ['num_embeddings', 'embedding_dim'])
quantized_module = PQEmbedding(centroids, assignments, num_embeddings, embedding_dim)
elif isinstance(module, nn.Conv2d):
(out_channels, in_channels, kernel_size) = map((lambda k: module.__dict__[k]), ['out_channels', 'in_channels', 'kernel_size'])
(stride, padding, dilation, groups, padding_mode) = map((lambda k: module.__dict__[k]), ['stride', 'padding', 'dilation', 'groups', 'padding_mode'])
quantized_module = PQConv2d(centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode)
else:
raise ValueError(f'Module {module} not yet supported for quantization')
attrsetter(layer)(model, quantized_module)
size_tracker.update(weight, block_size, n_centroids)
return quantized_layers |
def process_coverage():
global ARGS, MAP, FIRST_COVERAGE
while True:
fuzzer_files = []
for fuzzer in get_all_names(False):
fuzzer_files += get_coverage_fuzzer_files(fuzzer)
if fuzzer_files:
random.shuffle(fuzzer_files)
process_coverage_fuzzer_files(fuzzer_files)
save_all_bitmap()
FIRST_COVERAGE = False
else:
log('coverage: no new files')
if FIRST_COVERAGE:
save_all_bitmap()
FIRST_COVERAGE = False
if (not ARGS.live):
return
time.sleep(ARGS.sleep) |
class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
_parameter_constraints: dict = {'quantile': [Interval(Real, 0, 1, closed='neither')], 'alpha': [Interval(Real, 0, None, closed='left')], 'fit_intercept': ['boolean'], 'solver': [StrOptions({'highs-ds', 'highs-ipm', 'highs', 'interior-point', 'revised simplex'})], 'solver_options': [dict, None]}
def __init__(self, *, quantile=0.5, alpha=1.0, fit_intercept=True, solver='highs', solver_options=None):
self.quantile = quantile
self.alpha = alpha
self.fit_intercept = fit_intercept
self.solver = solver
self.solver_options = solver_options
_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
(X, y) = self._validate_data(X, y, accept_sparse=['csc', 'csr', 'coo'], y_numeric=True, multi_output=False)
sample_weight = _check_sample_weight(sample_weight, X)
n_features = X.shape[1]
n_params = n_features
if self.fit_intercept:
n_params += 1
alpha = (np.sum(sample_weight) * self.alpha)
if ((self.solver in ('highs-ds', 'highs-ipm', 'highs')) and (sp_version < parse_version('1.6.0'))):
raise ValueError(f'Solver {self.solver} is only available with scipy>=1.6.0, got {sp_version}')
else:
solver = self.solver
if ((solver == 'interior-point') and (sp_version >= parse_version('1.11.0'))):
raise ValueError(f'Solver {solver} is not anymore available in SciPy >= 1.11.0.')
if (sparse.issparse(X) and (solver not in ['highs', 'highs-ds', 'highs-ipm'])):
raise ValueError(f"Solver {self.solver} does not support sparse X. Use solver 'highs' for example.")
if ((self.solver_options is None) and (solver == 'interior-point')):
solver_options = {'lstsq': True}
else:
solver_options = self.solver_options
indices = np.nonzero(sample_weight)[0]
n_indices = len(indices)
if (n_indices < len(sample_weight)):
sample_weight = sample_weight[indices]
X = _safe_indexing(X, indices)
y = _safe_indexing(y, indices)
c = np.concatenate([np.full((2 * n_params), fill_value=alpha), (sample_weight * self.quantile), (sample_weight * (1 - self.quantile))])
if self.fit_intercept:
c[0] = 0
c[n_params] = 0
if (solver in ['highs', 'highs-ds', 'highs-ipm']):
eye = sparse.eye(n_indices, dtype=X.dtype, format='csc')
if self.fit_intercept:
ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
A_eq = sparse.hstack([ones, X, (- ones), (- X), eye, (- eye)], format='csc')
else:
A_eq = sparse.hstack([X, (- X), eye, (- eye)], format='csc')
else:
eye = np.eye(n_indices)
if self.fit_intercept:
ones = np.ones((n_indices, 1))
A_eq = np.concatenate([ones, X, (- ones), (- X), eye, (- eye)], axis=1)
else:
A_eq = np.concatenate([X, (- X), eye, (- eye)], axis=1)
b_eq = y
result = linprog(c=c, A_eq=A_eq, b_eq=b_eq, method=solver, options=solver_options)
solution = result.x
if (not result.success):
failure = {1: 'Iteration limit reached.', 2: 'Problem appears to be infeasible.', 3: 'Problem appears to be unbounded.', 4: 'Numerical difficulties encountered.'}
warnings.warn(((((f'''Linear programming for QuantileRegressor did not succeed.
Status is {result.status}: ''' + failure.setdefault(result.status, 'unknown reason')) + '\n') + 'Result message of linprog:\n') + result.message), ConvergenceWarning)
params = (solution[:n_params] - solution[n_params:(2 * n_params)])
self.n_iter_ = result.nit
if self.fit_intercept:
self.coef_ = params[1:]
self.intercept_ = params[0]
else:
self.coef_ = params
self.intercept_ = 0.0
return self |
class EarlyStopScheduler(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08):
super().__init__(optimizer, mode=mode, factor=factor, patience=patience, threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, eps=eps, verbose=verbose)
self.no_decrease = 0
def step(self, error, epoch=None):
current = float(error)
if (epoch is None):
epoch = self.last_epoch = (self.last_epoch + 1)
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0
if (self.num_bad_epochs > self.patience):
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
return self._reduce_lr(epoch)
def _reduce_lr(self, epoch):
for (i, param_group) in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max((old_lr * self.factor), self.min_lrs[i])
if ((old_lr - new_lr) > self.eps):
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate of group {} to {:.4e}.'.format(epoch, i, new_lr))
return False
else:
return True
def __repr__(self):
return 'EarlyStopScheduler(min_lr={}, patience={})'.format(self.min_lrs, self.patience) |
class TrainLmConfig():
data: Union[(LMDatasetConfig, LMMixtureDatasetConfig)] = field(default_factory=LMDatasetConfig)
trainer: TrainerConfig = field(default_factory=TrainerConfig)
model: LmConfig = field(default_factory=Gpt2Config)
optimizer: OptimizerConfig = field(default_factory=OptimizerConfig)
initialize_from_hf: Union[(bool, str)] = False
use_hf_model_config: bool = False
fcm_prob: float = 0.0
hf_save_path: Optional[str] = None
hf_upload: Optional[str] = None
hf_save_steps: int = 10000 |
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
is_deepspeed = False
if path.is_dir():
is_deepspeed = True
latest_path = (path / 'latest')
if latest_path.is_file():
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
path /= f'{tag}/mp_rank_00_model_states.pt'
state_dict = torch.load(path, map_location=device)
if is_deepspeed:
state_dict = state_dict['module']
def key_mapping(key):
return re.sub('^module.model.', '', key)
state_dict = {key_mapping(k): v for (k, v) in state_dict.items()}
return state_dict |
.pure
def test_two_backward_passes():
def train_step(x1: dace.float32[(10, 5)], x2: dace.float32[5], dy: dace.float32[10]):
x1.requires_grad_()
x2.requires_grad_()
z1 = (x1 + 1)
y1 = np.log(z1)
l1 = np.add.reduce(y1, axis=1)
z2 = (x2 * 2)
y2 = np.log(z2)
l2 = y2.sum()
l2.backward()
l1.backward(dy)
return (x1.grad, x2.grad)
def torch_fn(x1, x2, dy):
x1.requires_grad_()
x2.requires_grad_()
z1 = (x1 + 1)
y1 = torch.log(z1).sum(axis=1)
z2 = (x2 * 2)
y2 = torch.log(z2).sum()
y2.backward()
y1.backward(dy)
return (x1.grad, x2.grad)
sdfg = train_step.to_sdfg()
sdfg.expand_library_nodes()
sdfg.validate()
x1 = torch.randn(10, 5)
x2 = torch.randn(5)
dy = torch.randn(10)
(r1, r2) = train_step(x1.clone(), x2.clone(), dy.clone())
(ex_1, ex_2) = torch_fn(x1.clone(), x2.clone(), dy.clone())
tensors_close('x2.grad', ex_2, r2)
tensors_close('x1.grad', ex_1, r1) |
def adjust_learning_rate(optimizers, cur_iter, args):
scale_running_lr = ((1.0 - (float(cur_iter) / args.max_iters)) ** args.lr_pow)
args.running_lr_encoder = (args.lr_encoder * scale_running_lr)
args.running_lr_decoder = (args.lr_decoder * scale_running_lr)
(optimizer_encoder, optimizer_decoder) = optimizers
for param_group in optimizer_encoder.param_groups:
param_group['lr'] = args.running_lr_encoder
for param_group in optimizer_decoder.param_groups:
param_group['lr'] = args.running_lr_decoder |
def _unlink_solc(solc_path: Path) -> None:
solc_path.unlink()
if (_get_target_os() == 'windows'):
shutil.rmtree(solc_path.parent) |
class DSConvNetwork(network_base.BaseNetwork):
def __init__(self, inputs, trainable=True, conv_width=1.0):
self.conv_width = conv_width
network_base.BaseNetwork.__init__(self, inputs, trainable)
def setup(self):
self.feed('image').conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False).separable_conv(3, 3, round((self.conv_width * 64)), 2, name='conv1_2').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv2_1').separable_conv(3, 3, round((self.conv_width * 128)), 2, name='conv2_2').separable_conv(3, 3, round((self.conv_width * 256)), 1, name='conv3_1').separable_conv(3, 3, round((self.conv_width * 256)), 1, name='conv3_2').separable_conv(3, 3, round((self.conv_width * 256)), 1, name='conv3_3').separable_conv(3, 3, round((self.conv_width * 256)), 2, name='conv3_4').separable_conv(3, 3, round((self.conv_width * 512)), 1, name='conv4_1').separable_conv(3, 3, round((self.conv_width * 512)), 1, name='conv4_2').separable_conv(3, 3, round((self.conv_width * 256)), 1, name='conv4_3_CPM').separable_conv(3, 3, 128, 1, name='conv4_4_CPM').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv5_1_CPM_L1').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv5_2_CPM_L1').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv5_3_CPM_L1').conv(1, 1, 512, 1, 1, name='conv5_4_CPM_L1').conv(1, 1, 38, 1, 1, relu=False, name='conv5_5_CPM_L1')
self.feed('conv4_4_CPM').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv5_1_CPM_L2').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv5_2_CPM_L2').separable_conv(3, 3, round((self.conv_width * 128)), 1, name='conv5_3_CPM_L2').conv(1, 1, 512, 1, 1, name='conv5_4_CPM_L2').conv(1, 1, 19, 1, 1, relu=False, name='conv5_5_CPM_L2')
self.feed('conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'conv4_4_CPM').concat(3, name='concat_stage2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage2_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage2_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage2_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage2_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage2_L1').conv(1, 1, 128, 1, 1, name='Mconv6_stage2_L1').conv(1, 1, 38, 1, 1, relu=False, name='Mconv7_stage2_L1')
self.feed('concat_stage2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage2_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage2_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage2_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage2_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage2_L2').conv(1, 1, 128, 1, 1, name='Mconv6_stage2_L2').conv(1, 1, 19, 1, 1, relu=False, name='Mconv7_stage2_L2')
self.feed('Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'conv4_4_CPM').concat(3, name='concat_stage3').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage3_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage3_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage3_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage3_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage3_L1').conv(1, 1, 128, 1, 1, name='Mconv6_stage3_L1').conv(1, 1, 38, 1, 1, relu=False, name='Mconv7_stage3_L1')
self.feed('concat_stage3').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage3_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage3_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage3_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage3_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage3_L2').conv(1, 1, 128, 1, 1, name='Mconv6_stage3_L2').conv(1, 1, 19, 1, 1, relu=False, name='Mconv7_stage3_L2')
self.feed('Mconv7_stage3_L1', 'Mconv7_stage3_L2', 'conv4_4_CPM').concat(3, name='concat_stage4').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage4_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage4_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage4_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage4_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage4_L1').conv(1, 1, 128, 1, 1, name='Mconv6_stage4_L1').conv(1, 1, 38, 1, 1, relu=False, name='Mconv7_stage4_L1')
self.feed('concat_stage4').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage4_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage4_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage4_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage4_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage4_L2').conv(1, 1, 128, 1, 1, name='Mconv6_stage4_L2').conv(1, 1, 19, 1, 1, relu=False, name='Mconv7_stage4_L2')
self.feed('Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'conv4_4_CPM').concat(3, name='concat_stage5').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage5_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage5_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage5_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage5_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage5_L1').conv(1, 1, 128, 1, 1, name='Mconv6_stage5_L1').conv(1, 1, 38, 1, 1, relu=False, name='Mconv7_stage5_L1')
self.feed('concat_stage5').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage5_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage5_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage5_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage5_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage5_L2').conv(1, 1, 128, 1, 1, name='Mconv6_stage5_L2').conv(1, 1, 19, 1, 1, relu=False, name='Mconv7_stage5_L2')
self.feed('Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'conv4_4_CPM').concat(3, name='concat_stage6').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage6_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage6_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage6_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage6_L1').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage6_L1').conv(1, 1, 128, 1, 1, name='Mconv6_stage6_L1').conv(1, 1, 38, 1, 1, relu=False, name='Mconv7_stage6_L1')
self.feed('concat_stage6').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv1_stage6_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv2_stage6_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv3_stage6_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv4_stage6_L2').separable_conv(7, 7, round((self.conv_width * 128)), 1, name='Mconv5_stage6_L2').conv(1, 1, 128, 1, 1, name='Mconv6_stage6_L2').conv(1, 1, 19, 1, 1, relu=False, name='Mconv7_stage6_L2')
self.feed('Mconv7_stage6_L2', 'Mconv7_stage6_L1').concat(3, name='concat_stage7') |
class TFGroupViTTextModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def checkpoint_sequential(functions, segments, input, **kwargs):
preserve = kwargs.pop('preserve_rng_state', True)
if kwargs:
raise ValueError(('Unexpected keyword arguments: ' + ','.join((arg for arg in kwargs))))
def run_function(start, end, functions):
def forward(input):
for j in range(start, (end + 1)):
input = functions[j](input)
return input
return forward
if isinstance(functions, torch.nn.Sequential):
functions = list(functions.children())
segment_size = (len(functions) // segments)
end = (- 1)
for start in range(0, (segment_size * (segments - 1)), segment_size):
end = ((start + segment_size) - 1)
input = checkpoint(run_function(start, end, functions), input, preserve_rng_state=preserve)
return run_function((end + 1), (len(functions) - 1), functions)(input) |
def create_tensor(array: numpy.ndarray) -> Union[(torch.Tensor, numpy.ndarray)]:
if (array.dtype.kind in 'UO'):
return array
if (array.dtype == numpy.uint32):
array = numpy.asarray(array, dtype=numpy.int64)
return torch.tensor(array) |
def unpooling_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, kernel, channel_last=False):
dy = grad_inputs[0]
x0_shape = input_shapes[0]
ctx = nn.get_current_context()
df = UnpoolingDataGrad(ctx, kernel, channel_last)
df.xshape = x0_shape
dx0 = df(dy)
return dx0 |
class CvtSelfAttentionLinearProjection(nn.Module):
def forward(self, hidden_state):
(batch_size, num_channels, height, width) = hidden_state.shape
hidden_size = (height * width)
hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
return hidden_state |
def planetType(temperature, mass, radius):
if (mass is not np.nan):
sizeType = planetMassType(mass)
elif (radius is not np.nan):
sizeType = planetRadiusType(radius)
else:
return None
return '{0} {1}'.format(planetTempType(temperature), sizeType) |
_utils.test(arch=[ti.opengl, ti.vulkan])
def test_mpm99_aot():
quality = 1
(n_particles, n_grid) = ((9000 * (quality ** 2)), (128 * quality))
(dx, inv_dx) = ((1 / n_grid), float(n_grid))
dt = (0.0001 / quality)
(p_vol, p_rho) = (((dx * 0.5) ** 2), 1)
p_mass = (p_vol * p_rho)
(E, nu) = (1000.0, 0.2)
(mu_0, lambda_0) = ((E / (2 * (1 + nu))), ((E * nu) / ((1 + nu) * (1 - (2 * nu)))))
x = ti.Vector.field(2, dtype=float, shape=n_particles)
v = ti.Vector.field(2, dtype=float, shape=n_particles)
C = ti.Matrix.field(2, 2, dtype=float, shape=n_particles)
F = ti.Matrix.field(2, 2, dtype=float, shape=n_particles)
material = ti.field(dtype=int, shape=n_particles)
Jp = ti.field(dtype=float, shape=n_particles)
grid_v = ti.Vector.field(2, dtype=float, shape=(n_grid, n_grid))
grid_m = ti.field(dtype=float, shape=(n_grid, n_grid))
grid_v_int = ti.Vector.field(2, dtype=int, shape=(n_grid, n_grid))
grid_m_int = ti.field(dtype=int, shape=(n_grid, n_grid))
v_exp = 24
m_exp = 40
def substep():
for (i, j) in grid_m:
grid_v[(i, j)] = [0, 0]
grid_m[(i, j)] = 0
grid_v_int[(i, j)] = [0, 0]
grid_m_int[(i, j)] = 0
for p in x:
base = ((x[p] * inv_dx) - 0.5).cast(int)
fx = ((x[p] * inv_dx) - base.cast(float))
w = [(0.5 * ((1.5 - fx) ** 2)), (0.75 - ((fx - 1) ** 2)), (0.5 * ((fx - 0.5) ** 2))]
F[p] = ((ti.Matrix.identity(float, 2) + (dt * C[p])) F[p])
h = ti.exp((10 * (1.0 - Jp[p])))
if (material[p] == 1):
h = 0.3
(mu, la) = ((mu_0 * h), (lambda_0 * h))
if (material[p] == 0):
mu = 0.0
(U, sig, V) = ti.svd(F[p])
J = 1.0
for d in ti.static(range(2)):
new_sig = sig[(d, d)]
if (material[p] == 2):
new_sig = ti.min(ti.max(sig[(d, d)], (1 - 0.025)), (1 + 0.0045))
Jp[p] *= (sig[(d, d)] / new_sig)
sig[(d, d)] = new_sig
J *= new_sig
if (material[p] == 0):
F[p] = (ti.Matrix.identity(float, 2) * ti.sqrt(J))
elif (material[p] == 2):
F[p] = ((U sig) V.transpose())
stress = ((((2 * mu) * (F[p] - (U V.transpose()))) F[p].transpose()) + (((ti.Matrix.identity(float, 2) * la) * J) * (J - 1)))
stress = ((((((- dt) * p_vol) * 4) * inv_dx) * inv_dx) * stress)
affine = (stress + (p_mass * C[p]))
for (i, j) in ti.static(ti.ndrange(3, 3)):
offset = ti.Vector([i, j])
dpos = ((offset.cast(float) - fx) * dx)
weight = (w[i][0] * w[j][1])
grid_v_int[(base + offset)] += int(ti.floor((0.5 + ((weight * ((p_mass * v[p]) + (affine dpos))) * (2.0 ** v_exp)))))
grid_m_int[(base + offset)] += int(ti.floor((0.5 + ((weight * p_mass) * (2.0 ** m_exp)))))
for (i, j) in grid_m:
if (grid_m_int[(i, j)] > 0):
grid_v[(i, j)] = (((2 ** (m_exp - v_exp)) / grid_m_int[(i, j)]) * grid_v_int[(i, j)])
grid_v[(i, j)][1] -= (dt * 50)
if ((i < 3) and (grid_v[(i, j)][0] < 0)):
grid_v[(i, j)][0] = 0
if ((i > (n_grid - 3)) and (grid_v[(i, j)][0] > 0)):
grid_v[(i, j)][0] = 0
if ((j < 3) and (grid_v[(i, j)][1] < 0)):
grid_v[(i, j)][1] = 0
if ((j > (n_grid - 3)) and (grid_v[(i, j)][1] > 0)):
grid_v[(i, j)][1] = 0
for p in x:
base = ((x[p] * inv_dx) - 0.5).cast(int)
fx = ((x[p] * inv_dx) - base.cast(float))
w = [(0.5 * ((1.5 - fx) ** 2)), (0.75 - ((fx - 1.0) ** 2)), (0.5 * ((fx - 0.5) ** 2))]
new_v = ti.Vector.zero(float, 2)
new_C = ti.Matrix.zero(float, 2, 2)
for (i, j) in ti.static(ti.ndrange(3, 3)):
dpos = (ti.Vector([i, j]).cast(float) - fx)
g_v = grid_v[(base + ti.Vector([i, j]))]
weight = (w[i][0] * w[j][1])
new_v += (weight * g_v)
new_C += (((4 * inv_dx) * weight) * g_v.outer_product(dpos))
(v[p], C[p]) = (new_v, new_C)
x[p] += (dt * v[p])
group_size = (n_particles // 3)
def initialize():
for i in range(n_particles):
x[i] = [(((ti.random() * 0.2) + 0.3) + (0.1 * (i // group_size))), (((ti.random() * 0.2) + 0.05) + (0.32 * (i // group_size)))]
material[i] = (i // group_size)
v[i] = ti.Matrix([0, 0])
F[i] = ti.Matrix([[1, 0], [0, 1]])
Jp[i] = 1
with tempfile.TemporaryDirectory() as tmpdir:
m = ti.aot.Module()
m.add_field('x', x)
m.add_field('v', v)
m.add_field('C', C)
m.add_field('J', Jp)
m.add_field('grid_v', grid_v)
m.add_field('grid_m', grid_m)
m.add_field('grid_v_int', grid_v_int)
m.add_field('grid_m_int', grid_m_int)
m.add_field('material', material)
m.add_kernel(initialize)
m.add_kernel(substep)
m.save(tmpdir)
with open(os.path.join(tmpdir, 'metadata.json')) as json_file:
json.load(json_file) |
class Tokenizer(Registrable):
default_implementation = 'word'
def batch_tokenize(self, texts: List[str]) -> List[List[Token]]:
raise NotImplementedError
def tokenize(self, text: str) -> List[Token]:
raise NotImplementedError |
_task('translation_multi_simple_epoch')
class TranslationMultiSimpleEpochTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr', action=FileContentsAction)
parser.add_argument('--keep-inference-langtok', action='store_true', help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
self.source_langs = [d.split('-')[0] for d in self.lang_pairs]
self.target_langs = [d.split('-')[1] for d in self.lang_pairs]
self.check_dicts(self.dicts, self.source_langs, self.target_langs)
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(args, self.lang_pairs, langs, dicts, self.sampling_method)
def check_dicts(cls, dicts, source_langs, target_langs):
src_dict = dicts[source_langs[0]]
tgt_dict = dicts[target_langs[0]]
for src_lang in source_langs:
assert (src_dict == dicts[src_lang]), 'Diffrent dictionary are specified for different source languages; '
for tgt_lang in target_langs:
assert (tgt_dict == dicts[tgt_lang]), 'Diffrent dictionary are specified for different target languages; '
def setup_task(cls, args, **kwargs):
(langs, dicts, training) = MultilingualDatasetManager.prepare(cls.load_dictionary, args, **kwargs)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
if (split in self.datasets):
dataset = self.datasets[split]
if self.has_sharded_data(split):
if (self.args.virtual_epoch_size is not None):
if dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
return
else:
shard_epoch = epoch
else:
shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch)
logger.info(f'loading data for {split} epoch={epoch}/{shard_epoch}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
if (split in self.datasets):
del self.datasets[split]
logger.info('old dataset deleted manually')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
self.datasets[split] = self.data_manager.load_dataset(split, self.training, epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if (constraints is not None):
raise NotImplementedError('Constrained decoding with the multilingual_translation task is not supported')
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
(src_langtok_spec, tgt_langtok_spec) = self.args.langtoks['main']
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(dataset, src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(self.args.source_lang, self.args.target_lang, dataset=dataset.src, spec=src_langtok_spec)
return dataset
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if (not getattr(args, 'keep_inference_langtok', False)):
(_, tgt_langtok_spec) = self.args.langtoks['main']
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
extra_gen_cls_kwargs = (extra_gen_cls_kwargs or {})
extra_gen_cls_kwargs['symbols_to_strip_from_output'] = {tgt_lang_tok}
return super().build_generator(models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
(loss, sample_size, logging_output) = super().valid_step(sample, model, criterion)
return (loss, sample_size, logging_output)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
(_, tgt_langtok_spec) = self.args.langtoks['main']
if (not self.args.lang_tok_replacing_bos_eos):
if ((prefix_tokens is None) and tgt_langtok_spec):
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.size(0)
prefix_tokens = torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens)
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints)
else:
return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=(self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec) if tgt_langtok_spec else self.target_dictionary.eos()))
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def source_dictionary(self):
return self.dicts[self.source_langs[0]]
def target_dictionary(self):
return self.dicts[self.target_langs[0]]
def create_batch_sampler_func(self, max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=1, seed=1):
def construct_batch_sampler(dataset, epoch):
splits = [s for (s, _) in self.datasets.items() if (self.datasets[s] == dataset)]
split = (splits[0] if (len(splits) > 0) else None)
if (epoch is not None):
dataset.set_epoch(epoch)
start_time = time.time()
logger.info(f'start batch sampler: mem usage: {data_utils.get_mem_usage()}')
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
logger.info(f'[{split}] _sampler order indices time: {get_time_gap(start_time, time.time())}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
if (max_positions is not None):
my_time = time.time()
indices = self.filter_indices_by_size(indices, dataset, max_positions, ignore_invalid_inputs)
logger.info(f'[{split}] _sampler filter_by_size time: {get_time_gap(my_time, time.time())}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
my_time = time.time()
batch_sampler = dataset.batch_by_size(indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple)
logger.info(f'[{split}] _sampler batch_by_size time: {get_time_gap(my_time, time.time())}')
logger.info(f'[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
return batch_sampler
return construct_batch_sampler
def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False):
assert isinstance(dataset, FairseqDataset)
if (dataset in self.dataset_to_epoch_iter):
return self.dataset_to_epoch_iter[dataset]
if (self.args.sampling_method == 'RoundRobin'):
batch_iter = super().get_batch_iterator(dataset, max_tokens=max_tokens, max_sentences=max_sentences, max_positions=max_positions, ignore_invalid_inputs=ignore_invalid_inputs, required_batch_size_multiple=required_batch_size_multiple, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, data_buffer_size=data_buffer_size, disable_iterator_cache=disable_iterator_cache)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=required_batch_size_multiple, seed=seed)
epoch_iter = iterators.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=construct_batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch)
return epoch_iter |
def get_imagenet_models(config):
super_type = getattr(config, 'super_type', 'basic')
if (super_type == 'basic'):
from .ImageNet_ResNet import ResNet
from .ImageNet_MobileNetV2 import MobileNetV2
if (config.arch == 'resnet'):
return ResNet(config.block_name, config.layers, config.deep_stem, config.class_num, config.zero_init_residual, config.groups, config.width_per_group)
elif (config.arch == 'mobilenet_v2'):
return MobileNetV2(config.class_num, config.width_multi, config.input_channel, config.last_channel, 'InvertedResidual', config.dropout)
else:
raise ValueError('invalid arch : {:}'.format(config.arch))
elif super_type.startswith('infer'):
assert (len(super_type.split('-')) == 2), 'invalid super_type : {:}'.format(super_type)
infer_mode = super_type.split('-')[1]
if (infer_mode == 'shape'):
from .shape_infers import InferImagenetResNet
from .shape_infers import InferMobileNetV2
if (config.arch == 'resnet'):
return InferImagenetResNet(config.block_name, config.layers, config.xblocks, config.xchannels, config.deep_stem, config.class_num, config.zero_init_residual)
elif (config.arch == 'MobileNetV2'):
return InferMobileNetV2(config.class_num, config.xchannels, config.xblocks, config.dropout)
else:
raise ValueError('invalid arch-mode : {:}'.format(config.arch))
else:
raise ValueError('invalid infer-mode : {:}'.format(infer_mode))
else:
raise ValueError('invalid super-type : {:}'.format(super_type)) |
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str)
parser.add_argument('--gt_dir', type=str, default='')
parser.add_argument('--sample_dir', type=str, default='')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=100)
return parser |
class CVTArchive(ArchiveBase):
def __init__(self, bins, ranges, seed=None, dtype=np.float64, samples=100000, custom_centroids=None, k_means_kwargs=None, use_kd_tree=False, ckdtree_kwargs=None):
ArchiveBase.__init__(self, storage_dims=(bins,), behavior_dim=len(ranges), seed=seed, dtype=dtype)
ranges = list(zip(*ranges))
self._lower_bounds = np.array(ranges[0], dtype=self.dtype)
self._upper_bounds = np.array(ranges[1], dtype=self.dtype)
self._bins = bins
self._k_means_kwargs = ({} if (k_means_kwargs is None) else k_means_kwargs.copy())
if ('n_init' not in self._k_means_kwargs):
self._k_means_kwargs['n_init'] = 1
if ('init' not in self._k_means_kwargs):
self._k_means_kwargs['init'] = 'random'
if ('algorithm' not in self._k_means_kwargs):
self._k_means_kwargs['algorithm'] = 'full'
if ('random_state' not in self._k_means_kwargs):
self._k_means_kwargs['random_state'] = seed
self._use_kd_tree = use_kd_tree
self._centroid_kd_tree = None
self._ckdtree_kwargs = ({} if (ckdtree_kwargs is None) else ckdtree_kwargs.copy())
if (custom_centroids is None):
if (not isinstance(samples, int)):
samples = np.asarray(samples, dtype=self.dtype)
if (samples.shape[1] != self._behavior_dim):
raise ValueError(f'Samples has shape {samples.shape} but must be of shape (n_samples, len(ranges)={self._behavior_dim})')
self._samples = samples
self._centroids = None
else:
custom_centroids = np.asarray(custom_centroids, dtype=self.dtype)
if (custom_centroids.shape != (bins, self._behavior_dim)):
raise ValueError(f'custom_centroids has shape {custom_centroids.shape} but must be of shape (bins={bins}, len(ranges)={self._behavior_dim})')
self._centroids = custom_centroids
self._samples = None
def lower_bounds(self):
return self._lower_bounds
def upper_bounds(self):
return self._upper_bounds
_init
def samples(self):
return self._samples
_init
def centroids(self):
return self._centroids
def initialize(self, solution_dim):
ArchiveBase.initialize(self, solution_dim)
if (self._centroids is None):
self._samples = (self._rng.uniform(self._lower_bounds, self._upper_bounds, size=(self._samples, self._behavior_dim)).astype(self.dtype) if isinstance(self._samples, int) else self._samples)
self._centroids = k_means(self._samples, self._bins, **self._k_means_kwargs)[0]
if (self._centroids.shape[0] < self._bins):
raise RuntimeError(f'While generating the CVT, k-means clustering found {self._centroids.shape[0]} centroids, but this archive needs {self._bins} bins. This most likely happened because there are too few samples and/or too many bins.')
if self._use_kd_tree:
self._centroid_kd_tree = cKDTree(self._centroids, **self._ckdtree_kwargs)
(nopython=True)
def _brute_force_nn_numba(behavior_values, centroids):
distances = (np.expand_dims(behavior_values, axis=0) - centroids)
distances = np.sum(np.square(distances), axis=1)
return np.argmin(distances)
def get_index(self, behavior_values):
if self._use_kd_tree:
return int(self._centroid_kd_tree.query(behavior_values)[1])
return int(self._brute_force_nn_numba(behavior_values, self._centroids)) |
def layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True) |
class StreetMap():
def __init__(self):
self.scenario = None
self.graph = None
self.route_partition = None
self.lane_graph = None
def reset(self, scenario: BasicScenario):
self.scenario = scenario
self.graph = RoadLaneJunctionGraph(scenario)
self.route_partition = RoadLaneJunctionGraphPartition(self.graph)
def waypoint_on_lane(self, location, lane_id):
lane_node = self.graph.lanes[lane_id]
(lane_position, dist) = lane_node.sumolib_obj.getClosestLanePosAndDist(location, perpendicular=False)
position = sumolib.geomhelper.positionAtShapeOffset(lane_node.sumolib_obj.getShape(), lane_position)
return Waypoint(position, lane_node, lane_position) |
def build_rpn_head(cfg, input_shape, shadow_object_part=False):
name = cfg.MODEL.RPN.HEAD_NAME
return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape, shadow_object_part) |
def ssd(config, cfg, *args, **kwargs):
weights = config.weights
if (config.im_size == 512):
model = SSD512(config, cfg)
elif (config.im_size == 300):
model = SSD300(config, cfg)
else:
print_error_message('{} image size not supported'.format(config.im_size))
if weights:
import os
if (not os.path.isfile(weights)):
print_error_message('Weight file does not exist at {}. Please check. Exiting!!'.format(weights))
exit((- 1))
num_gpus = torch.cuda.device_count()
device = ('cuda' if (num_gpus >= 1) else 'cpu')
pretrained_dict = torch.load(weights, map_location=torch.device(device))
print_info_message('Loading pretrained base model weights')
basenet_dict = model.base_net.basenet.state_dict()
model_dict = model.state_dict()
overlap_dict = {k: v for (k, v) in pretrained_dict.items() if (k in basenet_dict)}
if (len(overlap_dict) == 0):
print_error_message('No overlaping weights between model file and pretrained weight file. Please check')
exit()
print_info_message('{:.2f} % of basenet weights copied to detectnet'.format((((len(overlap_dict) * 1.0) / len(model_dict)) * 100)))
basenet_dict.update(overlap_dict)
model.base_net.basenet.load_state_dict(basenet_dict)
print_info_message('Pretrained base model loaded!!')
else:
print_warning_message('Training from scratch!!. If you are testing, ignore this message. For testing, we do not load weights here.')
return model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.