code stringlengths 101 5.91M |
|---|
('spacy')
class SpacySentenceSplitter(SentenceSplitter):
def __init__(self, language: str='en_core_web_sm', rule_based: bool=False) -> None:
self.spacy = get_spacy_model(language, parse=(not rule_based), ner=False, pos_tags=False)
if rule_based:
if (not self.spacy.has_pipe('sbd')):
sbd = self.spacy.create_pipe('sbd')
self.spacy.add_pipe(sbd)
def split_sentences(self, text: str) -> List[str]:
return [sent.string.strip() for sent in self.spacy(text).sents]
def batch_split_sentences(self, texts: List[str]) -> List[List[str]]:
return [[sentence.string.strip() for sentence in doc.sents] for doc in self.spacy.pipe(texts)] |
class WeightNormalizedConvTranspose2d(_ConvTransposeMixin, _WeightNormalizedConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, scale=False, bias=False, dilation=1, init_factor=1, init_scale=1):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
super(WeightNormalizedConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, True, output_padding, scale, bias, init_factor, init_scale)
def forward(self, input, output_size=None):
output_padding = self._output_padding(input, output_size)
return F.conv_transpose2d(input, self.normalized_weight(), self.bias, self.stride, self.padding, output_padding, 1, self.dilation) |
def videos_resize(videoinfos):
global count
(videoid, videoname) = videoinfos
if os.path.exists(os.path.join(output_path, videoname)):
print(f'{videoname} is resized.')
return
inname = ((folder_path + '/') + videoname)
outname = ((output_path + '/') + videoname)
cmd = 'ffmpeg -y -i {} -filter:v scale="trunc(oh*a/2)*2:256" -c:a copy {}'.format(inname, outname)
subprocess.call(cmd, shell=True)
return |
def test_identities1():
x = np.array([(- 99.5), (- 9.5), (- 0.5), 0.5, 9.5, 99.5])
y = x.copy()
(x, y) = np.meshgrid(x, y)
z = (x + (1j * y)).flatten()
dataset = np.vstack((z, gamma(z))).T
def f(z):
return np.exp(loggamma(z))
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() |
def plot_rects(true_overlap, rand_overlap, savefile=None):
true_count = Counter(true_overlap)
rand_count = Counter(rand_overlap)
true_percent = [(true_count[(i, False)] / (true_count[(i, False)] + true_count[(i, True)])) for i in range(3)]
rand_percent = [(rand_count[(i, False)] / (rand_count[(i, False)] + rand_count[(i, True)])) for i in range(3)]
x = np.arange(3)
width = 0.35
(fig, ax) = plt.subplots()
rects_true = ax.bar((x - (width / 2)), true_percent, width=width, label='BPE')
rects_rand = ax.bar((x + (width / 2)), rand_percent, width=width, label='Random')
ax.set_xticks(x)
ax.set_xticklabels(['Alpha Helix', 'Strand', 'Beta Sheet'])
ax.set_ylabel('Percent Agreement')
ax.set_title('Agreement between tokens and secondary structure labels')
ax.legend(loc='lower right')
autolabel(ax, rects_true)
autolabel(ax, rects_rand)
plt.tight_layout()
plt.show() |
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = ((kernel_size - 1) // 2)
super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True)) |
def register_Ns3GridBuildingAllocator_methods(root_module, cls):
cls.add_constructor([param('ns3::GridBuildingAllocator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Create', 'ns3::BuildingContainer', [param('uint32_t', 'n')], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetBuildingAttribute', 'void', [param('std::string', 'n'), param('ns3::AttributeValue const &', 'v')])
return |
def main(unused_argv):
assert (not (FLAGS.train_shards % FLAGS.num_threads)), 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards'
assert (not (FLAGS.validation_shards % FLAGS.num_threads)), 'Please make the FLAGS.num_threads commensurate with FLAGS.validation_shards'
print(('Saving results to %s' % FLAGS.output_directory))
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
_process_dataset('validation', FLAGS.validation_directory, FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards, synset_to_human, image_to_bboxes) |
def upsample(x, size, mode):
return F.interpolate(x.unsqueeze(1), size=size, mode=mode, align_corners=False).squeeze().numpy() |
def get_times_for_device_framework_and_method(device, framework, method):
times = []
for n in ns:
time = res[(framework, device, n, method, 'time_per_epoch')]
if (time == '-'):
break
times.append(time)
times = np.array(times)
return times |
class ModelExpBiLSTMMulAttn(ModelTemplate):
def __init__(self, token_emb_mat, glove_emb_mat, tds, cds, tl, scope):
super(ModelExpBiLSTMMulAttn, self).__init__(token_emb_mat, glove_emb_mat, tds, cds, tl, scope)
self.update_tensor_add_ema_and_opt()
def build_network(self):
_logger.add()
_logger.add(('building %s neural network structure...' % cfg.network_type))
(tds, cds) = (self.tds, self.cds)
tl = self.tl
(tel, cel, cos, ocd, fh) = (self.tel, self.cel, self.cos, self.ocd, self.fh)
hn = self.hn
(bs, sl1, sl2) = (self.bs, self.sl1, self.sl2)
with tf.variable_scope('emb'):
token_emb_mat = generate_embedding_mat(tds, tel, init_mat=self.token_emb_mat, extra_mat=self.glove_emb_mat, extra_trainable=self.finetune_emb, scope='gene_token_emb_mat')
s1_emb = tf.nn.embedding_lookup(token_emb_mat, self.sent1_token)
s2_emb = tf.nn.embedding_lookup(token_emb_mat, self.sent2_token)
self.tensor_dict['s1_emb'] = s1_emb
self.tensor_dict['s2_emb'] = s2_emb
with tf.variable_scope('context_fusion'):
s1_seq_rep = contextual_bi_rnn(s1_emb, self.sent1_token_mask, hn, 'lstm', False, cfg.wd, cfg.dropout, self.is_train, 'bi_lstm')
tf.get_variable_scope().reuse_variables()
s2_seq_rep = contextual_bi_rnn(s2_emb, self.sent2_token_mask, hn, 'lstm', False, cfg.wd, cfg.dropout, self.is_train, 'bi_lstm')
self.tensor_dict['s1_seq_rep'] = s1_seq_rep
self.tensor_dict['s2_seq_rep'] = s2_seq_rep
with tf.variable_scope('sent_enc_attn'):
s1_rep = multi_dimensional_attention(s1_seq_rep, self.sent1_token_mask, 'multi_dimensional_attention', cfg.dropout, self.is_train, cfg.wd, 'relu', tensor_dict=self.tensor_dict, name='s1_attn')
tf.get_variable_scope().reuse_variables()
s2_rep = multi_dimensional_attention(s2_seq_rep, self.sent2_token_mask, 'multi_dimensional_attention', cfg.dropout, self.is_train, cfg.wd, 'relu', tensor_dict=self.tensor_dict, name='s2_attn')
self.tensor_dict['s1_rep'] = s1_rep
self.tensor_dict['s2_rep'] = s2_rep
with tf.variable_scope('output'):
out_rep = tf.concat([s1_rep, s2_rep, (s1_rep - s2_rep), (s1_rep * s2_rep)], (- 1))
pre_output = tf.nn.relu(linear([out_rep], hn, True, 0.0, scope='pre_output', squeeze=False, wd=cfg.wd, input_keep_prob=cfg.dropout, is_train=self.is_train))
logits = linear([pre_output], self.output_class, True, 0.0, scope='logits', squeeze=False, wd=cfg.wd, input_keep_prob=cfg.dropout, is_train=self.is_train)
self.tensor_dict[logits] = logits
return logits |
def str_to_number(value):
is_neg = False
if (value[:1] == '-'):
is_neg = True
value = value[1:]
if (len(value) < 2):
value = int(value, 0)
elif (value[0] == '0'):
literal_type = value[1]
if (literal_type in 'xX'):
value = int(value[2:], 16)
elif (literal_type in 'oO'):
value = int(value[2:], 8)
elif (literal_type in 'bB'):
value = int(value[2:], 2)
else:
value = int(value, 8)
else:
value = int(value, 0)
return ((- value) if is_neg else value) |
def create_predict_net(predictor_export_meta):
net = core.Net((predictor_export_meta.predict_net.name or 'predict'))
net.Proto().op.extend(predictor_export_meta.predict_net.op)
net.Proto().partition_info.extend(predictor_export_meta.predict_net.partition_info)
net.Proto().external_input.extend((predictor_export_meta.inputs + predictor_export_meta.parameters))
net.Proto().external_output.extend(predictor_export_meta.outputs)
net.Proto().arg.extend(predictor_export_meta.predict_net.arg)
if (predictor_export_meta.net_type is not None):
net.Proto().type = predictor_export_meta.net_type
if (predictor_export_meta.num_workers is not None):
net.Proto().num_workers = predictor_export_meta.num_workers
return net.Proto() |
class PolyhedralFan(SageObject):
def __init__(self, gfan_polyhedral_fan, parameter_indices=None):
if (parameter_indices is None):
parameter_indices = []
fan_keys = ['AMBIENT_DIM', 'DIM', 'LINEALITY_DIM', 'RAYS', 'N_RAYS', 'LINEALITY_SPACE', 'ORTH_LINEALITY_SPACE', 'F_VECTOR', 'CONES', 'MAXIMAL_CONES', 'PURE', 'SIMPLICIAL', 'MULTIPLICITIES']
poly_lines = gfan_polyhedral_fan.split('\n')
self.fan_dict = {}
cur_key = None
for ting in poly_lines:
if fan_keys.count(ting):
cur_key = ting
self.fan_dict[cur_key] = []
elif (cur_key and (ting != '')):
self.fan_dict[cur_key].append(ting)
self._ambient_dim = int(self.fan_dict['AMBIENT_DIM'][0])
self._dim = int(self.fan_dict['DIM'][0])
self._lineality_dim = int(self.fan_dict['LINEALITY_DIM'][0])
self._rays = []
for ray in self.fan_dict['RAYS']:
temp_ray = ray.split('\t')[0]
temp_ray = temp_ray.split(' ')
temp_ray = [int(x) for x in temp_ray]
if (parameter_indices != []):
for q in parameter_indices:
temp_ray = ((temp_ray[0:q] + [0]) + temp_ray[q:])
self._rays.append(temp_ray)
self._cone_dict = _cone_parse(self.fan_dict['CONES'])
self._maximal_cone_dict = _cone_parse(self.fan_dict['MAXIMAL_CONES'])
self._str = gfan_polyhedral_fan
def _repr_(self):
return 'Polyhedral fan in {} dimensions of dimension {}'.format(self.ambient_dim(), self.dim())
def _str_(self):
return self._str
def ambient_dim(self):
return self._ambient_dim
def dim(self):
return self._dim
def lineality_dim(self):
return self._lineality_dim
def rays(self):
return sorted(self._rays)
def cones(self):
return self._cone_dict
def maximal_cones(self):
return self._maximal_cone_dict
def f_vector(self):
str_data = self.fan_dict['F_VECTOR'][0]
fv = [Integer(x) for x in str_data.split(' ')]
return fv
def is_simplicial(self):
return bool(int(self.fan_dict['SIMPLICIAL'][0]))
def to_RationalPolyhedralFan(self):
try:
return self._fan
except AttributeError:
cdnt = []
cones = self.cones()
for x in cones:
if (x > 1):
cdnt += cones[x]
fan = Fan(cones=cdnt, rays=self.rays(), discard_faces=True)
self._fan = fan
return self._fan |
class MultipleNegativesRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer):
super(MultipleNegativesRankingLoss, self).__init__()
self.model = model
def forward(self, sentence_features: Iterable[Dict[(str, Tensor)]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
(reps_a, reps_b) = reps
return self.multiple_negatives_ranking_loss(reps_a, reps_b)
def multiple_negatives_ranking_loss(self, embeddings_a: Tensor, embeddings_b: Tensor):
scores = torch.matmul(embeddings_a, embeddings_b.t())
diagonal_mean = torch.mean(torch.diag(scores))
mean_log_row_sum_exp = torch.mean(torch.logsumexp(scores, dim=1))
return ((- diagonal_mean) + mean_log_row_sum_exp) |
class TestGrouping(TestCaseBase):
def test_parenthesis(self):
s = 'select (select (x3) x2) and (y2) bar'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, str(parsed))
self.assertEqual(len(parsed.tokens), 7)
self.assert_(isinstance(parsed.tokens[2], sql.Parenthesis))
self.assert_(isinstance(parsed.tokens[(- 1)], sql.Identifier))
self.assertEqual(len(parsed.tokens[2].tokens), 5)
self.assert_(isinstance(parsed.tokens[2].tokens[3], sql.Identifier))
self.assert_(isinstance(parsed.tokens[2].tokens[3].tokens[0], sql.Parenthesis))
self.assertEqual(len(parsed.tokens[2].tokens[3].tokens), 3)
def test_comments(self):
s = '/*\n * foo\n */ \n bar'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assertEqual(len(parsed.tokens), 2)
def test_assignment(self):
s = 'foo := 1;'
parsed = sqlparse.parse(s)[0]
self.assertEqual(len(parsed.tokens), 1)
self.assert_(isinstance(parsed.tokens[0], sql.Assignment))
s = 'foo := 1'
parsed = sqlparse.parse(s)[0]
self.assertEqual(len(parsed.tokens), 1)
self.assert_(isinstance(parsed.tokens[0], sql.Assignment))
def test_identifiers(self):
s = 'select foo.bar from "myscheme"."table" where fail. order'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assert_(isinstance(parsed.tokens[2], sql.Identifier))
self.assert_(isinstance(parsed.tokens[6], sql.Identifier))
self.assert_(isinstance(parsed.tokens[8], sql.Where))
s = 'select * from foo where foo.id = 1'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assert_(isinstance(parsed.tokens[(- 1)].tokens[(- 1)].tokens[0], sql.Identifier))
s = 'select * from (select "foo"."id" from foo)'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assert_(isinstance(parsed.tokens[(- 1)].tokens[3], sql.Identifier))
s = "INSERT INTO `test` VALUES('foo', 'bar');"
parsed = sqlparse.parse(s)[0]
types = [l.ttype for l in parsed.tokens if (not l.is_whitespace())]
self.assertEquals(types, [T.DML, T.Keyword, None, T.Keyword, None, T.Punctuation])
s = 'select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable'
parsed = sqlparse.parse(s)[0]
self.assertEqual(len(parsed.tokens), 7)
self.assert_(isinstance(parsed.tokens[2], sql.IdentifierList))
self.assertEqual(len(parsed.tokens[2].tokens), 4)
identifiers = list(parsed.tokens[2].get_identifiers())
self.assertEqual(len(identifiers), 2)
self.assertEquals(identifiers[0].get_alias(), u'col')
def test_identifier_wildcard(self):
p = sqlparse.parse('a.*, b.id')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[(- 1)], sql.Identifier))
def test_identifier_name_wildcard(self):
p = sqlparse.parse('a.*')[0]
t = p.tokens[0]
self.assertEqual(t.get_name(), '*')
self.assertEqual(t.is_wildcard(), True)
def test_identifier_invalid(self):
p = sqlparse.parse('a.')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assertEqual(p.tokens[0].has_alias(), False)
self.assertEqual(p.tokens[0].get_name(), None)
self.assertEqual(p.tokens[0].get_real_name(), None)
self.assertEqual(p.tokens[0].get_parent_name(), 'a')
def test_identifier_as_invalid(self):
p = sqlparse.parse('foo as select *')[0]
self.assert_(len(p.tokens), 5)
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assertEqual(len(p.tokens[0].tokens), 1)
self.assertEqual(p.tokens[2].ttype, T.Keyword)
def test_identifier_function(self):
p = sqlparse.parse('foo() as bar')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Function))
p = sqlparse.parse('foo()||col2 bar')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Function))
def test_identifier_extended(self):
p = sqlparse.parse('foo+100')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
p = sqlparse.parse('foo + 100')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
p = sqlparse.parse('foo*100')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
def test_identifier_list(self):
p = sqlparse.parse('a, b, c')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
p = sqlparse.parse('(a, b, c)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.IdentifierList))
def test_identifier_list_case(self):
p = sqlparse.parse('a, case when 1 then 2 else 3 end as b, c')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
p = sqlparse.parse('(a, case when 1 then 2 else 3 end as b, c)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.IdentifierList))
def test_identifier_list_other(self):
p = sqlparse.parse("select *, null, 1, 'foo', bar from mytable, x")[0]
self.assert_(isinstance(p.tokens[2], sql.IdentifierList))
l = p.tokens[2]
self.assertEqual(len(l.tokens), 13)
def test_identifier_list_with_inline_comments(self):
p = sqlparse.parse('foo /* a comment */, bar')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[3], sql.Identifier))
def test_where(self):
s = 'select * from foo where bar = 1 order by id desc'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertTrue(len(p.tokens), 16)
s = 'select x from (select y from foo where bar = 1) z'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertTrue(isinstance(p.tokens[(- 1)].tokens[0].tokens[(- 2)], sql.Where))
def test_typecast(self):
s = 'select foo::integer from bar'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_typecast(), 'integer')
self.assertEqual(p.tokens[2].get_name(), 'foo')
s = 'select (current_database())::information_schema.sql_identifier'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_typecast(), 'information_schema.sql_identifier')
def test_alias(self):
s = 'select foo as bar from mytable'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_real_name(), 'foo')
self.assertEqual(p.tokens[2].get_alias(), 'bar')
s = 'select foo from mytable t1'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[6].get_real_name(), 'mytable')
self.assertEqual(p.tokens[6].get_alias(), 't1')
s = 'select foo::integer as bar from mytable'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_alias(), 'bar')
s = 'SELECT DISTINCT (current_database())::information_schema.sql_identifier AS view'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[4].get_alias(), 'view')
def test_alias_case(self):
p = sqlparse.parse('CASE WHEN 1 THEN 2 ELSE 3 END foo')[0]
self.assertEqual(len(p.tokens), 1)
self.assertEqual(p.tokens[0].get_alias(), 'foo')
def test_alias_returns_none(self):
p = sqlparse.parse('foo.bar')[0]
self.assertEqual(len(p.tokens), 1)
self.assertEqual(p.tokens[0].get_alias(), None)
def test_idlist_function(self):
p = sqlparse.parse('foo(1) x, bar')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
def test_comparison_exclude(self):
p = sqlparse.parse('(=)')[0]
self.assert_(isinstance(p.tokens[0], sql.Parenthesis))
self.assert_((not isinstance(p.tokens[0].tokens[1], sql.Comparison)))
p = sqlparse.parse('(a=1)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.Comparison))
p = sqlparse.parse('(a>=1)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.Comparison))
def test_function(self):
p = sqlparse.parse('foo()')[0]
self.assert_(isinstance(p.tokens[0], sql.Function))
p = sqlparse.parse('foo(null, bar)')[0]
self.assert_(isinstance(p.tokens[0], sql.Function))
self.assertEqual(len(list(p.tokens[0].get_parameters())), 2)
def test_function_not_in(self):
p = sqlparse.parse('in(1, 2)')[0]
self.assertEqual(len(p.tokens), 2)
self.assertEqual(p.tokens[0].ttype, T.Keyword)
self.assert_(isinstance(p.tokens[1], sql.Parenthesis))
def test_varchar(self):
p = sqlparse.parse('"text" Varchar(50) NOT NULL')[0]
self.assert_(isinstance(p.tokens[2], sql.Function)) |
def _ensure_project_exists(client: scaleapi.ScaleClient, project_name: str):
with _scale_projects_lock:
if (project_name not in _scale_projects):
try:
client.create_project(project_name=project_name, task_type=TaskType.TextCollection, rapid=True, params={})
hlog(f'Created new Scale project: {project_name}')
hlog('IMPORTANT: Run scripts/scale/create_and_setup_project.py to set up a calibration batch in your project.')
except ScaleDuplicateResource:
existing_project = client.get_project(project_name=project_name)
if (existing_project.type != TaskType.TextCollection.value):
raise ScaleCritiqueClientError(f"The existing project with name '{project_name}' has a task type of '{existing_project.type}' instead of '{TaskType.TextCollection.value}'. Rename the existing batch to a different name to allow HELM to create a new project with the correct task type.")
hlog(f'Reusing existing Scale project: {project_name}')
_scale_projects.add(project_name) |
class MultiManagerEnvironment(EnasTrainEnv):
def __init__(self, data_descriptive_features, is_enas='auto', *args, **kwargs):
super(MultiManagerEnvironment, self).__init__(*args, **kwargs)
assert (type(self.manager) is list), ('MultiManagerEnasEnvironment must have a List of manager instances, got %s' % type(self.manager))
self.manager_cnt = len(self.manager)
self.is_enas = is_enas
for i in range(self.manager_cnt):
assert issubclass(type(self.manager[i]), BaseNetworkManager), ('MultiManagerEnasEnvironment expects a List of Manager instances, got %s for %i-th element' % (type(self.manager[i]), i))
self.data_descriptive_features = data_descriptive_features
assert (len(self.data_descriptive_features) == self.manager_cnt), ('data descriptive features must match the number of managers; got %i description, %i managers' % (len(self.data_descriptive_features), self.manager_cnt))
if (self.is_enas == 'auto'):
if all([isinstance(self.manager[i], EnasManager) for i in range(self.manager_cnt)]):
self.is_enas = True
else:
self.is_enas = False
def _warmup(self):
assert self.is_enas, 'You can only set warm_up_epochs>0 if is_enas=True'
self.logger.info(('warm-up for child model: %i epochs' % self.child_warm_up_epochs))
warmup_nsteps = None
for i in range(1, (self.child_warm_up_epochs + 1)):
self.logger.info(('warm-up : %i epoch' % i))
for j in range(self.manager_cnt):
self.manager[j].get_rewards(trial=(- i), model_arc=None, nsteps=warmup_nsteps)
def _train_loop(self):
if self.resume_prev_run:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='a+')
else:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='w')
writer = csv.writer(f)
starttime = datetime.datetime.now()
action_probs_record = []
loss_and_metrics_list = []
controller_step = (self.start_ep * self.max_step_per_ep)
for child_step in range(self.start_ep, self.max_episode):
try:
if self.is_enas:
for j in range(self.manager_cnt):
self.logger.info(('sampling with manager %i' % j))
self.manager[j].get_rewards(child_step, None, nsteps=self.child_train_steps)
ep_reward = 0
loss_and_metrics_ep = defaultdict(float)
for step in range(self.max_step_per_ep):
for j in range(self.manager_cnt):
ep_probs = []
(arc_seq, probs) = self.controller.get_action(description_feature=self.data_descriptive_features[[j]])
self.entropy_record.append(compute_entropy(probs))
ep_probs.append(probs)
action_list = parse_action_str_squeezed(arc_seq, self.controller.state_space)
self.logger.debug('Manager {}, Predicted actions : {}'.format(j, [str(x) for x in action_list]))
(reward, loss_and_metrics) = self.manager[j].get_rewards(trial=controller_step, model_arc=arc_seq, nsteps=self.child_train_steps)
self.logger.debug(((('Rewards : ' + str(reward)) + ' Metrics : ') + str(loss_and_metrics)))
ep_reward += reward
for x in loss_and_metrics.keys():
loss_and_metrics_ep[x] += loss_and_metrics[x]
self.controller.store(prob=probs, action=arc_seq, reward=reward, description=self.data_descriptive_features[[j]], manager_index=j)
data = [('%i-%i' % (j, controller_step)), [loss_and_metrics[x] for x in sorted(loss_and_metrics.keys())], reward]
if self.squeezed_action:
data.extend(arc_seq)
else:
data.extend(action_list)
writer.writerow(data)
f.flush()
controller_step += 1
loss_and_metrics_list.append({x: (v / self.max_step_per_ep) for (x, v) in loss_and_metrics_ep.items()})
ep_p = [(sum(p) / len(p)) for p in zip(*ep_probs)]
action_probs_record.append(ep_p)
if (child_step >= (self.initial_buffering_queue - 1)):
loss = self.controller.train(child_step, self.working_dir)
self.total_reward += np.sum(np.array(self.controller.buffer.lt_adv[(- 1)]).flatten())
self.logger.info(('Total reward : ' + str(self.total_reward)))
self.logger.info(('END episode %d: Controller loss : %0.6f' % (child_step, loss)))
self.logger.info(('-' * 10))
else:
self.logger.info(('END episode %d: Buffering' % child_step))
self.logger.info(('-' * 10))
if ((self.save_controller_every is not None) and ((child_step % self.save_controller_every) == 0)):
self.logger.info(('Saving controller weights for epoch %d' % child_step))
self.controller.save_weights(os.path.join(self.working_dir, ('controller_weights-epoch-%i.h5' % child_step)))
except KeyboardInterrupt:
self.logger.info('User disrupted training')
break
consumed_time = (datetime.datetime.now() - starttime).total_seconds()
self.logger.info(('used time: %.2f %%' % ((consumed_time / self.time_budget) * 100)))
if (consumed_time >= self.time_budget):
self.logger.info('training ceased because run out of time budget')
break
self.logger.debug(('Total Reward : %s' % self.total_reward))
f.close()
return (action_probs_record, loss_and_metrics_list)
def train(self):
if (self.child_warm_up_epochs > 0):
self._warmup()
(action_probs_record, loss_and_metrics_list) = self._train_loop()
metrics_dict = {k: v for (k, v) in zip(sorted(loss_and_metrics_list[0].keys()), range(len(loss_and_metrics_list[0])))}
plot_controller_performance(os.path.join(self.working_dir, 'train_history.csv'), metrics_dict=metrics_dict, save_fn=os.path.join(self.working_dir, 'train_history.png'), N_sma=5)
plot_environment_entropy(self.entropy_record, os.path.join(self.working_dir, 'entropy.png'))
save_kwargs = {}
if self.with_input_blocks:
save_kwargs['input_nodes'] = self.manager.model_fn.inputs_op
self.action_probs_record = action_probs_record
save_action_weights(action_probs_record, self.controller.state_space, self.working_dir, with_input_blocks=self.with_input_blocks, with_skip_connection=self.with_skip_connection, **save_kwargs)
save_stats(loss_and_metrics_list, self.working_dir)
if self.should_plot:
plot_action_weights(self.working_dir)
plot_wiring_weights(self.working_dir, self.with_input_blocks, self.with_skip_connection)
plot_stats2(self.working_dir)
act_idx = []
for p in action_probs_record[(- 1)]:
act_idx.append(np.argmax(p))
return act_idx |
_LAYERS.register_module()
class DropBlock(nn.Module):
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert ((block_size % 2) == 1)
assert (0 < drop_prob <= 1)
assert (warmup_iters >= 0)
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
if (not self.training):
return x
self.iter_cnt += 1
(N, C, H, W) = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, ((H - self.block_size) + 1), ((W - self.block_size) + 1))
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, ([(self.block_size // 2)] * 4), value=0)
mask = F.max_pool2d(input=mask, stride=(1, 1), kernel_size=(self.block_size, self.block_size), padding=(self.block_size // 2))
mask = (1 - mask)
x = (((x * mask) * mask.numel()) / (eps + mask.sum()))
return x
def _compute_gamma(self, feat_size):
gamma = ((self.drop_prob * feat_size[0]) * feat_size[1])
gamma /= (((feat_size[0] - self.block_size) + 1) * ((feat_size[1] - self.block_size) + 1))
gamma /= (self.block_size ** 2)
factor = (1.0 if (self.iter_cnt > self.warmup_iters) else (self.iter_cnt / self.warmup_iters))
return (gamma * factor)
def extra_repr(self):
return f'drop_prob={self.drop_prob}, block_size={self.block_size}, warmup_iters={self.warmup_iters}' |
_agent('simul_trans_text')
class SimulTransTextAgent(SimulTransAgent):
def build_word_splitter(self, args):
self.word_splitter = {}
self.word_splitter['src'] = SPLITTER_DICT[args.src_splitter_type](getattr(args, f'src_splitter_path'))
self.word_splitter['tgt'] = SPLITTER_DICT[args.tgt_splitter_type](getattr(args, f'tgt_splitter_path'))
def load_dictionary(self, task):
self.dict = {}
self.dict['tgt'] = task.target_dictionary
self.dict['src'] = task.source_dictionary
def update_states(self, states, new_state):
if states['finish_read']:
return states
new_word = new_state['segment']
if (new_word not in [DEFAULT_EOS]):
tokens = self.word_splitter['src'].split(new_word)
indices = self.dict['src'].encode_line(tokens, line_tokenizer=(lambda x: x), add_if_not_exist=False, append_eos=False).tolist()
else:
tokens = [new_word]
indices = [self.dict['src'].eos()]
states['finish_read'] = True
states['segments']['src'] += [new_word]
states['tokens']['src'] += tokens
self._append_indices(states, indices, 'src')
return states
def read_action(self, states):
states['steps']['src'] += 1
if (len(states['tokens']['src']) == 0):
return {'key': GET, 'value': None}
if (len(states['tokens']['src']) <= states['steps']['src']):
return {'key': GET, 'value': None}
return None
def finish_read(self, states):
return (states['finish_read'] and (len(states['tokens']['src']) == states['steps']['src'])) |
def remove_cuda(config_list):
cuda_config = {'device': 'cuda'}
return [config for config in config_list if (cuda_config not in config)] |
class HtmlBuilder():
def __init__(self, indent=None):
self.html = []
self.indent_amount = indent
self.indent_level = 0
self.add_count = 0
def add(self, data, one_line=False):
self.add_count += 1
if one_line:
self.html[(- 1)] += data
else:
if self.indent_amount:
indent = (' ' * (self.indent_amount * self.indent_level))
data = (indent + data)
self.html.append(data)
def element(self, name, attrs={}, only_with_attrs=False):
if (only_with_attrs and (not attrs)):
(yield)
return
attrs_str = ' '.join((f"{name}='{value}'" for (name, value) in attrs.items()))
if attrs_str:
attrs_str = (' ' + attrs_str)
self.add(f'<{name}{attrs_str}>')
self.indent_level += 1
initial_len = len(self.html)
try:
(yield)
finally:
self.indent_level -= 1
closing = f'</{name}>'
self.add(f'</{name}>', one_line=(len(self.html) == initial_len))
def string(self):
sep = ('' if (self.indent_amount is None) else '\n')
return sep.join(self.html) |
class DataSequencer(object):
def __init__(self, sequence_strategy, time_horizon):
self.sequence_strategy = sequence_strategy
self.time_horizon = time_horizon
if (sequence_strategy not in VALID_SEQUENCE_STRATEGIES):
raise ValueError(('%s is not a valid sequence embedding strategy.' % sequence_strategy))
self.frames = 1
if (sequence_strategy == 'first_last'):
self.frames = 2
elif (sequence_strategy == 'all'):
self.frames = self.time_horizon
def load(self, images, states, outputs):
is_image_file = (images.dtype == tf.string)
if (self.sequence_strategy == 'first'):
if is_image_file:
loaded_images = [utils.tf_load_image(images, 0)]
else:
loaded_images = [images[0]]
emb_states = [states[0]]
emb_outputs = [outputs[0]]
elif (self.sequence_strategy == 'last'):
if is_image_file:
loaded_images = [utils.tf_load_image(images, (self.time_horizon - 1))]
else:
loaded_images = [images[(self.time_horizon - 1)]]
emb_states = [states[(- 1)]]
emb_outputs = [outputs[(- 1)]]
elif (self.sequence_strategy == 'first_last'):
if is_image_file:
loaded_images = [utils.tf_load_image(images, 0), utils.tf_load_image(images, (self.time_horizon - 1))]
else:
loaded_images = [images[0], images[(self.time_horizon - 1)]]
emb_states = [states[0], states[(- 1)]]
emb_outputs = [outputs[0], outputs[(- 1)]]
elif (self.sequence_strategy == 'all'):
if is_image_file:
loaded_images = [utils.tf_load_image(images, t) for t in range(self.time_horizon)]
else:
loaded_images = images
emb_states = [states[t] for t in range(self.time_horizon)]
emb_outputs = [outputs[t] for t in range(self.time_horizon)]
else:
raise ValueError(('%s is not a valid sequence embedding strategy.' % self.sequence_strategy))
return (loaded_images, emb_states, emb_outputs) |
def _create_computational_graph(fun_list: List['goos.Function']) -> Tuple[(FunctionMap, Graph, Set[NodeId], NodeId)]:
out_nodes = [id(fun) for fun in fun_list]
fun_map = {node: fun for (node, fun) in zip(out_nodes, fun_list)}
in_nodes = set()
heavy_nodes = set()
graph = {}
qu = collections.deque()
for node in out_nodes:
qu.append(node)
while qu:
node = qu.popleft()
graph[node] = []
if _is_heavy_fun(fun_map[node]):
heavy_nodes.add(node)
if isinstance(fun_map[node], goos.Variable):
in_nodes.add(node)
for next_fun in fun_map[node]._goos_inputs:
next_fun_id = id(next_fun)
graph[node].append(next_fun_id)
if (next_fun_id in fun_map.keys()):
continue
fun_map[next_fun_id] = next_fun
qu.append(next_fun_id)
return (fun_map, graph, in_nodes, out_nodes, heavy_nodes) |
def preprocess_mask(mask):
mask = mask.convert('L')
(w, h) = mask.size
(w, h) = map((lambda x: (x - (x % 32))), (w, h))
mask = mask.resize(((w // 8), (h // 8)), resample=PIL.Image.NEAREST)
mask = (np.array(mask).astype(np.float32) / 255.0)
mask = np.tile(mask, (4, 1, 1))
mask = mask[None].transpose(0, 1, 2, 3)
mask = (1 - mask)
mask = torch.from_numpy(mask)
return mask |
_model
def metaformer_pppa_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
add_pos_embs = [None, None, None, partial(AddPositionEmb, spatial_shape=[7, 7])]
token_mixers = [Pooling, Pooling, Pooling, Attention]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = MetaFormer(layers, embed_dims=embed_dims, token_mixers=token_mixers, mlp_ratios=mlp_ratios, downsamples=downsamples, add_pos_embs=add_pos_embs, **kwargs)
model.default_cfg = _cfg()
if pretrained:
url = model_urls['metaformer_pppa_s12_224']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint)
return model |
def mlp(x, dims, is_training=True, act_fn=None, dtype=tf.float32, add_bias=True, wd=None, init_std=None, init_method=None, scope='mlp', dropout=None, trainable=True):
num_layer = (len(dims) - 1)
h = x
with tf.variable_scope(scope):
for ii in range(num_layer):
with tf.variable_scope('layer_{}'.format(ii)):
dim_in = dims[ii]
dim_out = dims[(ii + 1)]
if ((init_method is not None) and init_method[ii]):
w = weight_variable_cpu([dim_in, dim_out], init_method=init_method[ii], dtype=dtype, init_param={'mean': 0.0, 'stddev': init_std[ii]}, wd=wd, name='w', trainable=trainable)
else:
w = weight_variable_cpu([dim_in, dim_out], init_method='truncated_normal', dtype=dtype, init_param={'mean': 0.0, 'stddev': init_std[ii]}, wd=wd, name='w', trainable=trainable)
if add_bias:
b = weight_variable_cpu([dim_out], init_method='constant', dtype=dtype, init_param={'val': 0.0}, name='b', trainable=trainable)
h = tf.matmul(h, w, name='linear')
if add_bias:
h = tf.add(h, b, name='linear_bias')
if (act_fn and (act_fn[ii] is not None)):
h = act_fn[ii](h)
if ((dropout is not None) and dropout[ii]):
log.info('Apply dropout 0.5')
if is_training:
keep_prob = 0.5
else:
keep_prob = 1.0
h = tf.nn.dropout(h, keep_prob=keep_prob)
return h |
def ULIP_PN_SSG(args):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0)
point_encoder = Pointnet2_Ssg()
pc_feat_dims = 256
model = ULIP_WITH_IMAGE(embed_dim=512, vision_width=768, point_encoder=point_encoder, vision_model=vision_model, context_length=77, vocab_size=49408, transformer_width=512, transformer_heads=8, transformer_layers=12, pc_feat_dims=pc_feat_dims)
if (not args.evaluate_3d):
pretrain_slip_model = torch.load('./data/initialize_models/slip_base_100ep.pt', map_location=torch.device('cpu'))
pretrain_slip_model_params = pretrain_slip_model['state_dict']
pretrain_slip_model_params = {param_name.replace('module.', ''): param for (param_name, param) in pretrain_slip_model_params.items()}
for (name, param) in model.named_parameters():
if (name not in pretrain_slip_model_params):
continue
if isinstance(pretrain_slip_model_params[name], Parameter):
param_new = pretrain_slip_model_params[name].data
else:
param_new = pretrain_slip_model_params[name]
param.requires_grad = False
print('load {} and freeze'.format(name))
param.data.copy_(param_new)
return model |
def valid_file_prefix(prefix):
if (os.path.dirname(prefix) != ''):
raise argparse.ArgumentTypeError(('%s is not a valid output prefix (includes a directory).' % prefix))
return prefix |
(datatype[(N, M)], datatype[(M, M)], datatype[M], datatype[M])
def correlation(data, corr, mean, stddev):
def comp_mean(j: _[0:M], i: _[0:N]):
(inp << data[(i, j)])
(out >> mean(1, (lambda x, y: (x + y)), 0)[j])
out = inp
def comp_mean2(j: _[0:M]):
(inp << mean[j])
(out >> mean[j])
out = (inp / N)
def comp_stddev(j: _[0:M], i: _[0:N]):
(inp << data[(i, j)])
(inmean << mean[j])
(out >> stddev(1, (lambda x, y: (x + y)), 0)[j])
out = ((inp - inmean) * (inp - inmean))
def comp_stddev2(j: _[0:M]):
(inp << stddev[j])
(out >> stddev[j])
out = math.sqrt((inp / N))
if (out <= 0.1):
out = 1.0
def center_data(i: _[0:N], j: _[0:M]):
(ind << data[(i, j)])
(m << mean[j])
(sd << stddev[j])
(oud >> data[(i, j)])
oud = ((ind - m) / (math.sqrt(datatype(N)) * sd))
def comp_corr_diag(i: _[0:M]):
(corrout >> corr[(i, i)])
corrout = 1.0
def comp_corr_row(i: _[0:(M - 1)]):
def comp_corr_col(j: _[(i + 1):M]):
def comp_cov_k(k: _[0:N]):
(indi << data[(k, i)])
(indj << data[(k, j)])
(cov_ij >> corr(1, (lambda x, y: (x + y)), 0)[(i, j)])
cov_ij = (indi * indj)
def symmetrize(i: _[0:(M - 1)]):
def symmetrize_col(j: _[(i + 1):M]):
(corrin << corr[(i, j)])
(corrout >> corr[(j, i)])
corrout = corrin |
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d((4 * growth_rate))
self.conv2 = nn.Conv2d((4 * growth_rate), growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.sigmoid(self.bn1(x)))
out = self.conv2(F.sigmoid(self.bn2(out)))
out = torch.cat([out, x], 1)
return out |
def test_pipeline_methods_pca_svm():
iris = load_iris()
X = iris.data
y = iris.target
clf = SVC(gamma='scale', probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y) |
class OverallNCALoss(nn.Module):
def __init__(self, modules, device):
super(OverallNCALoss, self).__init__()
self.device = device
self.criterion_dict = {}
for module in modules:
self.criterion_dict[module] = NCALoss(alpha=1, beta=1, ep=0.0)
self.criterion_dict['joint'] = NCALoss(alpha=1, beta=1, ep=0.0)
def forward(self, output_dict, data_dict):
loss_dict = {}
for module in output_dict.keys():
emb = output_dict[module]
emb = F.normalize(emb)
e1i_idxs = data_dict['e1i']
e2i_idxs = data_dict['e2i']
emb_e1i = emb[e1i_idxs]
emb_e2i = emb[e2i_idxs]
loss_dict[module] = self.criterion_dict[module](emb_e1i, emb_e2i)
loss_sum = 0
for module in loss_dict.keys():
loss_sum += loss_dict[module]
loss_dict['loss'] = loss_sum
return loss_dict |
def read_translations(path, n_repeats):
segment_counter = 0
segment_translations = []
translations = defaultdict(list)
for line in open(path):
segment_translations.append(' '.join(line.split()))
if (len(segment_translations) == n_repeats):
translations[segment_counter] = segment_translations
segment_translations = []
segment_counter += 1
return translations |
def learn(*, policy, env, nsteps, total_episodes, ent_coef, lr, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, keep_all_ckpt=False, paths=100, epsilon=1.0):
if isinstance(epsilon, float):
epsilon = constfn(epsilon)
else:
assert callable(epsilon)
if isinstance(lr, float):
lr = constfn(lr)
else:
raise NotImplementedError
assert callable(lr)
if isinstance(cliprange, float):
cliprange = constfn(cliprange)
else:
raise NotImplementedError
assert callable(cliprange)
total_episodes = int(total_episodes)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = (nenvs * nsteps)
nbatch_train = (nbatch // nminibatches)
make_model = (lambda : EPOptModel(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm))
if (save_interval and logger.get_dir()):
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
runner = EPOptRunner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
update = 0
episodes_so_far = 0
old_savepath = None
while True:
update += 1
if (episodes_so_far > total_episodes):
break
assert ((nbatch % nminibatches) == 0)
nbatch_train = (nbatch // nminibatches)
tstart = time.time()
frac = (1.0 - ((update - 1.0) / total_episodes))
lrnow = lr(frac)
cliprangenow = cliprange(frac)
epsilonnow = epsilon(update)
(obs, returns, masks, actions, values, neglogpacs, states, epinfos, num_episodes) = runner.run(paths=paths, epsilon=epsilonnow)
assert (num_episodes == np.sum(masks)), (num_episodes, np.sum(masks))
episodes_so_far += num_episodes
epinfobuf.extend(epinfos)
mblossvals = []
if (states is None):
inds = np.arange(nbatch)
for _ in range(noptepochs):
mblossvals.append(model.train(lrnow, cliprangenow, *(obs, returns, masks, actions, values, neglogpacs)))
else:
raise NotImpelementedError('Use examples.epopt_lstm')
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int((nbatch / (tnow - tstart)))
if (((update % log_interval) == 0) or (update == 1)):
ev = explained_variance(values, returns)
logger.logkv('serial_timesteps', (update * nsteps))
logger.logkv('nupdates', update)
logger.logkv('epsilon', epsilonnow)
logger.logkv('total_episodes', episodes_so_far)
logger.logkv('total_timesteps', (update * nbatch))
logger.logkv('fps', fps)
logger.logkv('explained_variance', float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', (tnow - tfirststart))
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
logger.dumpkvs()
if (save_interval and (((update % save_interval) == 0) or (update == 1)) and logger.get_dir()):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, ('%.5i' % update))
print('Saving to', savepath)
obs_norms = {}
obs_norms['clipob'] = env.clipob
obs_norms['mean'] = env.ob_rms.mean
obs_norms['var'] = (env.ob_rms.var + env.epsilon)
with open(osp.join(checkdir, 'normalize'), 'wb') as f:
pickle.dump(obs_norms, f, pickle.HIGHEST_PROTOCOL)
model.save(savepath)
if ((not keep_all_ckpt) and old_savepath):
print('Removing previous checkpoint', old_savepath)
os.remove(old_savepath)
old_savepath = savepath
env.close() |
def mos_wav2vec2(refresh=False, *args, **kwargs):
kwargs['ckpt'] = '
return mos_wav2vec2_url(*args, refresh=refresh, **kwargs) |
def prior(D=10, lower_bound=(- 1.0), upper_bound=1.0, rng=None):
if (rng is None):
rng = np.random.default_rng()
return rng.uniform(low=lower_bound, high=upper_bound, size=D) |
class PreActivationResNet(nn.Module):
def __init__(self, block, layers, sample_size, sample_duration, shortcut_type='B', num_classes=400, last_fc=True):
self.last_fc = last_fc
self.inplanes = 64
super(PreActivationResNet, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)
last_duration = math.ceil((sample_duration / 16))
last_size = math.ceil((sample_size / 32))
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if (shortcut_type == 'A'):
downsample = partial(downsample_basic_block, planes=(planes * block.expansion), stride=stride)
else:
downsample = nn.Sequential(nn.Conv3d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm3d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
if self.last_fc:
x = self.fc(x)
return x |
def main(args):
config_file = args.config_file
config = utils.import_file(config_file, 'config_')
if (config.base_random_seed is not None):
random.seed(config.base_random_seed)
torch.manual_seed(config.base_random_seed)
network = LiftedGAN()
network.initialize(config)
log_dir = utils.create_log_dir(config.log_base_dir, config.name, config_file)
summary_writer = SummaryWriter(log_dir)
if config.restore_model:
start_epoch = network.restore_model(config.restore_model)
else:
start_epoch = 0
sample_codes = torch.rand(config.n_samples, 512)
print(f'''
Start Training
# epochs: {config.num_epochs}
batch_size: {config.batch_size}
''')
for epoch in range(start_epoch, config.num_epochs):
start_time = time.time()
for step in range(config.epoch_size):
(watchlist, summary, global_step) = network.train_step()
if ((step % config.print_interval) == 0):
watchlist['time'] = (time.time() - start_time)
utils.display_info(epoch, step, watchlist)
start_time = time.time()
if ((((global_step - 1) % config.summary_interval) == 0) or ((step == 0) and (epoch == start_epoch))):
summary = network.add_videos(summary)
utils.write_summary(summary_writer, summary, global_step)
results = test_batch(network, sample_codes, batch_size=config.batch_size)
images = utils.stack_images(results['recon_im'], results['canon_im'])
torchvision.utils.save_image(images, f'{log_dir}/samples/{global_step}.jpg', nrow=8, normalize=True)
network.save_model(log_dir, (epoch + 1)) |
class TestRaster2D(unittest.TestCase):
def test_square(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1, 3.5, 3.5, 1], [1, 1, 3.5, 3.5]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, 1, 1, 0.5], [0, 1, 1, 0.5], [0, 0.5, 0.5, 0.25]]), 7)
def test_square_nonuniform(self):
grid_x = np.array([0, 1, 3, 3.5, 4.2])
grid_y = np.array([0, 2.5, 3, 3.7])
poly_xy = np.array([[1, 3.5, 3.5, 1], [1, 1, 3.5, 3.5]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0], [0.6, 1, (5.0 / 7.0)], [0.6, 1, (5.0 / 7.0)], [0, 0, 0]]), 7)
def test_square_partially_outside_grid(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1, 5, 5, 1], [1, 1, 3.5, 3.5]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, 1, 1, 0.5], [0, 1, 1, 0.5], [0, 1, 1, 0.5]]), 7)
def test_square_outside_grid(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[5, 7.5, 7.5, 5], [1, 1, 3.5, 3.5]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.zeros((4, 4)), 7)
poly_xy = np.array([[1, 3.5, 3.5, 1], [5, 5, 7.5, 7.5]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.zeros((4, 4)), 7)
poly_xy = np.array([[5, 7.5, 7.5, 5], [5, 5, 7.5, 7.5]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.zeros((4, 4)), 7)
def test_square_overlap_pixel(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1, 2, 2, 1], [1, 1, 2, 2]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]), 7)
def test_square_inside_pixel(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1.1, 1.9, 1.9, 1.1], [1.1, 1.1, 1.9, 1.9]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, 0.64, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]), 7)
def test_triangle(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1, 3, 3], [1, 1, 4]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, (2.0 / 3.0), (1.0 / 12.0), 0], [0, 1, (11.0 / 12.0), (1.0 / 3.0)], [0, 0, 0, 0]]), 7)
def test_triangle_partially_outside_grid(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1, 5, 3], [1, 1, 4]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, (2.0 / 3.0), (1.0 / 12.0), 0], [0, 1, (11.0 / 12.0), (1.0 / 3.0)], [0, 1, (11.0 / 12.0), (1.0 / 3.0)]]), 7)
def test_triangle_inside_pixel(self):
grid_x = np.arange(5)
grid_y = np.arange(5)
poly_xy = np.array([[1, 1.5, 1.5], [1, 1, 2]])
render = float_raster.raster_2D(poly_xy, grid_x, grid_y)
np.testing.assert_array_almost_equal(render, np.array([[0, 0, 0, 0], [0, 0.25, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]), 7)
def test_raise_value_error_invalid_poly_xy(self):
with self.assertRaisesRegex(ValueError, 'Expected `poly_xy` to have 2 rows, got*'):
float_raster.raster_2D(np.array([[1, 2, 4]]), np.arange(5), np.arange(5))
def test_raise_value_error_invalid_grid_x(self):
with self.assertRaisesRegex(ValueError, 'Expected both `grid_x` and `grid_y` to have atleast 2*'):
float_raster.raster_2D(np.array([[1, 2, 4], [1, 2, 1]]), np.array([0]), np.array([1, 2, 3]))
def test_raise_value_error_invalid_grid_x(self):
with self.assertRaisesRegex(ValueError, 'Expected both `grid_x` and `grid_y` to have atleast 2*'):
float_raster.raster_2D(np.array([[1, 2, 4], [1, 2, 1]]), np.array([0, 1, 2]), np.array([])) |
def make_eval_data(args):
xgrid = np.linspace((- 0.5), 0.5, args.fr_size, endpoint=False)
if (args.kernel_type == 'triangle'):
kernel_param = (args.triangle_slope / args.signal_dim)
else:
kernel_param = (args.gaussian_std / args.signal_dim)
return load_dataloader_fixed_noise(args.n_validation, signal_dim=args.signal_dim, max_n_freq=args.max_n_freq, min_sep=args.min_sep, distance=args.distance, amplitude=args.amplitude, floor_amplitude=args.floor_amplitude, kernel_type=args.kernel_type, kernel_param=kernel_param, batch_size=args.batch_size, xgrid=xgrid, snr=args.snr, noise=args.noise) |
class AttentionValueDecoder(nn.Module):
def __init__(self, h_dim, out_size):
super().__init__()
self.conv1 = nn.Conv2d(h_dim, h_dim, 1, stride=1, padding=0)
self.conv2 = nn.Conv2d(h_dim, out_size, 1, stride=1, padding=0)
def forward(self, x):
hidden = F.relu(self.conv1(x))
hidden = F.relu(self.conv2(hidden))
return hidden |
class SerialBlock_adapt_M(nn.Module):
def __init__(self, seq_length, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None, adapt_method=None, num_domains=4):
super().__init__()
self.cpe = shared_cpe
self.norm1s = nn.ModuleList([norm_layer(dim) for _ in range(num_domains)])
self.adapt_method = adapt_method
if (self.adapt_method == 'Sup'):
self.factoratt_crpe = FactorAtt_ConvRelPosEnc_Sup(seq_length, dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe, num_domains=num_domains)
else:
self.factoratt_crpe = FactorAtt_ConvRelPosEnc(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2s = nn.ModuleList([norm_layer(dim) for _ in range(num_domains)])
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, size: Tuple[(int, int)], domain_label=None, d=None):
d = int(d)
x = self.cpe(x, size)
cur = self.norm1s[d](x)
if ((self.adapt_method != None) and (domain_label != None)):
cur = self.factoratt_crpe(cur, size, domain_label)
else:
cur = self.factoratt_crpe(cur, size)
x = (x + self.drop_path(cur))
cur = self.norm2s[d](x)
cur = self.mlp(cur)
x = (x + self.drop_path(cur))
return x |
class Resize(Function):
def forward(ctx, tensor, sizes):
ctx.sizes = sizes
ctx.numel = reduce((lambda x, y: (x * y)), sizes, 1)
if (tensor.numel() != ctx.numel):
raise RuntimeError("requested resize to {} ({} elements in total), but the given tensor has a size of {} ({} elements). autograd's resize can only change the shape of a given tensor, while preserving the number of elements. ".format('x'.join(map(str, sizes)), ctx.numel, 'x'.join(map(str, tensor.size())), tensor.numel()))
ctx.input_sizes = tensor.size()
if tensor.is_quantized:
tensor.copy_(tensor)
return tensor.contiguous().view(*sizes)
if tensor.is_contiguous():
result = tensor.new(tensor).contiguous().view(*sizes)
return result
else:
return tensor.contiguous().view(*sizes)
def backward(ctx, grad_output):
assert (grad_output.numel() == ctx.numel)
return (grad_output.contiguous().view(ctx.input_sizes), None) |
def get_windows_version(run_lambda):
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd)) |
def load_word_vectors(file, vocab_word_vec_file, word2id, vector_size=300, header=False):
word2vector = {}
if os.path.exists(vocab_word_vec_file):
print(('Loading vocabulary word vectors from %s...' % vocab_word_vec_file))
with open(vocab_word_vec_file, 'r', encoding='utf-8') as f:
for line in f:
word = line.split(' ')[0]
assert (word in word2id), ('Error: %s in vocab word vec file is not in vocab.' % word)
line = ' '.join(line.split(' ')[1:]).strip()
vector = np.fromstring(line, dtype=float, sep=' ')
assert (len(vector) == vector_size), ('Error: %d != vector size %d for word %s.' % (len(vector), vector_size, word))
word2vector[word] = vector
return word2vector
print(('Reading word vectors from %s...' % file))
with open(file, 'r', encoding='utf-8') as f:
for (i, line) in enumerate(f):
if ((i == 0) and header):
continue
if (((i % 100000) == 0) and (i > 0)):
print(('Processed %d vectors.' % i))
word = line.split(' ')[0]
if (word not in word2id):
continue
line = ' '.join(line.split(' ')[1:]).strip()
vector = np.fromstring(line, dtype=float, sep=' ')
assert (len(vector) == vector_size)
word2vector[word] = vector
print(('Writing word vectors to %s...' % vocab_word_vec_file))
with open(vocab_word_vec_file, 'w', encoding='utf-8') as f:
for (word, vector) in word2vector.items():
f.write(('%s %s\n' % (word, ' '.join([str(c) for c in vector]))))
return word2vector |
class ImaginaryElement(AdditiveGroupElement):
def __init__(self, parent, imag):
if (parent is None):
raise ValueError('parent must be provided')
super().__init__(parent=parent)
try:
self._imag_ = parent.base()(imag)
except (TypeError, ValueError) as e:
from sage.rings.asymptotic.misc import combine_exceptions
from sage.structure.element import parent as parent_function
raise combine_exceptions(ValueError('{} ({}) is not in {}'.format(imag, parent_function(imag), parent.base())), e)
def imag(self):
return self._imag_
def real(self):
return self.parent().base().zero()
def __hash__(self):
return hash((self.parent(), self._imag_))
_richcmp_ = richcmp_by_eq_and_lt('_eq_', '_lt_')
def _eq_(self, other):
return (self._imag_ == other._imag_)
def _lt_(self, other):
raise RuntimeError("cannot decide '<' for imaginary elements {} and {}".format(self, other))
def _repr_(self):
from sage.rings.asymptotic.misc import repr_op
if (self._imag_ == 0):
return '0'
if (self._imag_ == 1):
return 'I'
if (self._imag_ == (- 1)):
return '-I'
return repr_op(self._imag_, '*', 'I')
def _add_(self, other):
P = self.parent()
return P.element_class(P, (self._imag_ + other._imag_))
def _sub_(self, other):
P = self.parent()
return P.element_class(P, (self._imag_ - other._imag_))
def __neg__(self):
P = self.parent()
return P.element_class(P, (- self._imag_)) |
def postprocess_qa_predictions_with_beam_search(examples, features, predictions: Tuple[(np.ndarray, np.ndarray)], version_2_with_negative: bool=False, n_best_size: int=20, max_answer_length: int=30, start_n_top: int=5, end_n_top: int=5, output_dir: Optional[str]=None, prefix: Optional[str]=None, log_level: Optional[int]=logging.WARNING):
assert (len(predictions) == 5), '`predictions` should be a tuple with five elements.'
(start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) = predictions
assert (len(predictions[0]) == len(features)), f'Got {len(predictions[0])} predicitions and {len(features)} features.'
example_id_to_index = {k: i for (i, k) in enumerate(examples['id'])}
features_per_example = collections.defaultdict(list)
for (i, feature) in enumerate(features):
features_per_example[example_id_to_index[feature['example_id']]].append(i)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = (collections.OrderedDict() if version_2_with_negative else None)
logger.setLevel(log_level)
logger.info(f'Post-processing {len(examples)} example predictions split into {len(features)} features.')
for (example_index, example) in enumerate(tqdm(examples)):
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
for feature_index in feature_indices:
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
offset_mapping = features[feature_index]['offset_mapping']
token_is_max_context = features[feature_index].get('token_is_max_context', None)
if ((min_null_score is None) or (feature_null_score < min_null_score)):
min_null_score = feature_null_score
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = ((i * end_n_top) + j)
end_index = int(end_indexes[j_index])
if ((start_index >= len(offset_mapping)) or (end_index >= len(offset_mapping)) or (offset_mapping[start_index] is None) or (offset_mapping[end_index] is None)):
continue
if ((end_index < start_index) or (((end_index - start_index) + 1) > max_answer_length)):
continue
if ((token_is_max_context is not None) and (not token_is_max_context.get(str(start_index), False))):
continue
prelim_predictions.append({'offsets': (offset_mapping[start_index][0], offset_mapping[end_index][1]), 'score': (start_log_prob[i] + end_log_prob[j_index]), 'start_log_prob': start_log_prob[i], 'end_log_prob': end_log_prob[j_index]})
predictions = sorted(prelim_predictions, key=(lambda x: x['score']), reverse=True)[:n_best_size]
context = example['context']
for pred in predictions:
offsets = pred.pop('offsets')
pred['text'] = context[offsets[0]:offsets[1]]
if (len(predictions) == 0):
predictions.insert(0, {'text': '', 'start_logit': (- 1e-06), 'end_logit': (- 1e-06), 'score': (- 2e-06)})
scores = np.array([pred.pop('score') for pred in predictions])
exp_scores = np.exp((scores - np.max(scores)))
probs = (exp_scores / exp_scores.sum())
for (prob, pred) in zip(probs, predictions):
pred['probability'] = prob
all_predictions[example['id']] = predictions[0]['text']
if version_2_with_negative:
scores_diff_json[example['id']] = float(min_null_score)
all_nbest_json[example['id']] = [{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for (k, v) in pred.items()} for pred in predictions]
if (output_dir is not None):
assert os.path.isdir(output_dir), f'{output_dir} is not a directory.'
prediction_file = os.path.join(output_dir, ('predictions.json' if (prefix is None) else f'{prefix}_predictions.json'))
nbest_file = os.path.join(output_dir, ('nbest_predictions.json' if (prefix is None) else f'{prefix}_nbest_predictions.json'))
if version_2_with_negative:
null_odds_file = os.path.join(output_dir, ('null_odds.json' if (prefix is None) else f'{prefix}_null_odds.json'))
logger.info(f'Saving predictions to {prediction_file}.')
with open(prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
logger.info(f'Saving nbest_preds to {nbest_file}.')
with open(nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
logger.info(f'Saving null_odds to {null_odds_file}.')
with open(null_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
return (all_predictions, scores_diff_json) |
class SensorCompliance():
def __init__(self):
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('vrx_gazebo')
self.config_dir = os.path.join(pkg_path, 'config', 'wamv_config')
self.boxes = find_boxes(os.path.join(self.config_dir, 'sensor_compliance', 'bounding_boxes.yaml'))
self.sensors_dir = (rospy.get_param('sensors_dir') + '/')
self.default_parameters = get_macros(self.sensors_dir)
self.numeric = yaml.safe_load(open(os.path.join(self.config_dir, 'sensor_compliance', 'numeric.yaml')))
return
def param_compliance(self, sensor_type, params={}):
params = params.copy()
if (sensor_type not in self.default_parameters):
rospy.logerr(('%s is not defined anywhere under %s' % (sensor_type, self.config_dir)))
assert (sensor_type in self.default_parameters), ('%s is not defined anywhere under %s' % (sensor_type, self.config_dir))
for i in params:
if (i not in self.numeric[sensor_type]['allowed_params']):
rospy.logerr(('%s parameter specification of %s not permitted' % (i, sensor_type)))
for i in self.default_parameters[sensor_type]:
if (i not in params):
params[i] = self.default_parameters[sensor_type][i]
if ('x' and 'y' and ('z' in params)):
xyz = np.array([float(params['x']), float(params['y']), float(params['z'])])
for box in self.boxes:
if box.fit(xyz):
return True
rospy.logerr(('%s %s is out of bounds' % (sensor_type, params['name'])))
rospy.logerr(('%s %s is at xyz=(%s, %s, %s), %s' % (sensor_type, params['name'], xyz[0], xyz[1], xyz[2], ('must fit in at least one of the following boxes ' + 'with remaining space:'))))
for box in self.boxes:
rospy.logerr((' %s' % str(box)))
return False
else:
return True
def number_compliance(self, sensor_type, n):
if (n > self.numeric[sensor_type]['num']):
rospy.logerr(('Too many %s requested' % sensor_type))
rospy.logerr((' maximum of %s %s allowed' % (self.numeric[sensor_type]['num'], sensor_type)))
return False
return True |
class Parsopoulos(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.0)] * self.N), ([5.0] * self.N)))
self.global_optimum = [[(pi / 2.0), pi]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return ((cos(x[0]) ** 2.0) + (sin(x[1]) ** 2.0)) |
def register_Ns3VhtConfiguration_methods(root_module, cls):
cls.add_constructor([param('ns3::VhtConfiguration const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
return |
def postprocess_qa_predictions_with_beam_search(examples, features, predictions: Tuple[(np.ndarray, np.ndarray)], version_2_with_negative: bool=False, n_best_size: int=20, max_answer_length: int=30, start_n_top: int=5, end_n_top: int=5, output_dir: Optional[str]=None, prefix: Optional[str]=None, log_level: Optional[int]=logging.WARNING):
if (len(predictions) != 5):
raise ValueError('`predictions` should be a tuple with five elements.')
(start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) = predictions
if (len(predictions[0]) != len(features)):
raise ValueError(f'Got {len(predictions[0])} predictions and {len(features)} features.')
example_id_to_index = {k: i for (i, k) in enumerate(examples['id'])}
features_per_example = collections.defaultdict(list)
for (i, feature) in enumerate(features):
features_per_example[example_id_to_index[feature['example_id']]].append(i)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = (collections.OrderedDict() if version_2_with_negative else None)
logger.setLevel(log_level)
logger.info(f'Post-processing {len(examples)} example predictions split into {len(features)} features.')
for (example_index, example) in enumerate(tqdm(examples)):
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
for feature_index in feature_indices:
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
offset_mapping = features[feature_index]['offset_mapping']
token_is_max_context = features[feature_index].get('token_is_max_context', None)
if ((min_null_score is None) or (feature_null_score < min_null_score)):
min_null_score = feature_null_score
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = ((i * end_n_top) + j)
end_index = int(end_indexes[j_index])
if ((start_index >= len(offset_mapping)) or (end_index >= len(offset_mapping)) or (offset_mapping[start_index] is None) or (len(offset_mapping[start_index]) < 2) or (offset_mapping[end_index] is None) or (len(offset_mapping[end_index]) < 2)):
continue
if ((end_index < start_index) or (((end_index - start_index) + 1) > max_answer_length)):
continue
if ((token_is_max_context is not None) and (not token_is_max_context.get(str(start_index), False))):
continue
prelim_predictions.append({'offsets': (offset_mapping[start_index][0], offset_mapping[end_index][1]), 'score': (start_log_prob[i] + end_log_prob[j_index]), 'start_log_prob': start_log_prob[i], 'end_log_prob': end_log_prob[j_index]})
predictions = sorted(prelim_predictions, key=(lambda x: x['score']), reverse=True)[:n_best_size]
context = example['context']
for pred in predictions:
offsets = pred.pop('offsets')
pred['text'] = context[offsets[0]:offsets[1]]
if (len(predictions) == 0):
min_null_score = (- 2e-06)
predictions.insert(0, {'text': '', 'start_logit': (- 1e-06), 'end_logit': (- 1e-06), 'score': min_null_score})
scores = np.array([pred.pop('score') for pred in predictions])
exp_scores = np.exp((scores - np.max(scores)))
probs = (exp_scores / exp_scores.sum())
for (prob, pred) in zip(probs, predictions):
pred['probability'] = prob
all_predictions[example['id']] = predictions[0]['text']
if version_2_with_negative:
scores_diff_json[example['id']] = float(min_null_score)
all_nbest_json[example['id']] = [{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for (k, v) in pred.items()} for pred in predictions]
if (output_dir is not None):
if (not os.path.isdir(output_dir)):
raise EnvironmentError(f'{output_dir} is not a directory.')
prediction_file = os.path.join(output_dir, ('predictions.json' if (prefix is None) else f'{prefix}_predictions.json'))
nbest_file = os.path.join(output_dir, ('nbest_predictions.json' if (prefix is None) else f'{prefix}_nbest_predictions.json'))
if version_2_with_negative:
null_odds_file = os.path.join(output_dir, ('null_odds.json' if (prefix is None) else f'{prefix}_null_odds.json'))
logger.info(f'Saving predictions to {prediction_file}.')
with open(prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
logger.info(f'Saving nbest_preds to {nbest_file}.')
with open(nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
logger.info(f'Saving null_odds to {null_odds_file}.')
with open(null_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
return (all_predictions, scores_diff_json) |
class Denormalize(object):
def __init__(self, mean, std):
mean = np.array(mean)
std = np.array(std)
self._mean = ((- mean) / std)
self._std = (1 / std)
def __call__(self, tensor):
if isinstance(tensor, np.ndarray):
return ((tensor - self._mean.reshape((- 1), 1, 1)) / self._std.reshape((- 1), 1, 1))
return normalize(tensor, self._mean, self._std) |
def reset_to_default() -> None:
set_temp(0.015)
set_low_freq(1, 'Hz')
set_high_freq(3, 'GHz')
set_t_exp(10, 'us') |
def Cutout(img, v, max_v, bias=0):
if (v == 0):
return img
v = (_float_parameter(v, max_v) + bias)
v = int((v * min(img.size)))
return CutoutAbs(img, v) |
_utils.test()
def test_check_matrix_field_member_shape():
a = ti.Matrix.field(2, 2, ti.i32)
ti.root.dense(ti.i, 10).place(a.get_scalar_field(0, 0))
ti.root.dense(ti.i, 11).place(a.get_scalar_field(0, 1))
ti.root.dense(ti.i, 10).place(a.get_scalar_field(1, 0))
ti.root.dense(ti.i, 11).place(a.get_scalar_field(1, 1))
def foo():
pass
with pytest.raises(RuntimeError, match='Members of the following field have different shapes.*'):
foo() |
class Ax3DPose(object):
def __init__(self, ax, joints, lcolor='#3498db', rcolor='#e74c3c', ccolor='#2fb551'):
matplotlib.rcParams['animation.embed_limit'] = 200
self.joints = joints
self.I = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 20, 25, 26, 20, 28, 29, 20, 31, 32, 20, 34, 35, 21, 37, 38, 21, 40, 41, 21, 43, 44, 21, 46, 47, 21, 49, 50])
self.J = np.arange(1, 52)
self.LR = np.ones(52, dtype=int)
self.LR[[0, 3, 6, 9, 12, 15, 17, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]] = 0
self.LR[[2, 5, 8, 11, 14]] = 2
self.ax = ax
vals = np.zeros((52, 3))
self.plots = []
for i in np.arange(len(self.I)):
x = np.array([vals[(self.I[i], 0)], vals[(self.J[i], 0)]])
y = np.array([vals[(self.I[i], 1)], vals[(self.J[i], 1)]])
z = np.array([vals[(self.I[i], 2)], vals[(self.J[i], 2)]])
if (self.LR[i] == 0):
c = lcolor
elif (self.LR[i] == 1):
c = rcolor
else:
c = ccolor
self.plots.append(self.ax.plot(x, y, z, lw=3, c=(c if self.LR[i] else rcolor)))
self.ax.set_xlabel('x')
self.ax.set_ylabel('y')
self.ax.set_zlabel('z')
def update(self, frame, lcolor='#3498db', rcolor='#e74c3c', ccolor='#2fb551'):
vals = self.joints[frame]
for i in np.arange(len(self.I)):
x = np.array([vals[(self.I[i], 0)], vals[(self.J[i], 0)]])
y = np.array([vals[(self.I[i], 1)], vals[(self.J[i], 1)]])
z = np.array([vals[(self.I[i], 2)], vals[(self.J[i], 2)]])
self.plots[i][0].set_xdata(x)
self.plots[i][0].set_ydata(y)
self.plots[i][0].set_3d_properties(z)
if (self.LR[i] == 0):
c = lcolor
elif (self.LR[i] == 1):
c = rcolor
else:
c = ccolor
self.plots[i][0].set_color(c)
r = (1 if (np.max([vals[(0, 0)], vals[(0, 1)], vals[(0, 2)]]) <= 10) else 1000)
(xroot, yroot, zroot) = (vals[(0, 0)], vals[(0, 1)], vals[(0, 2)])
self.ax.set_xlim3d([((- r) + xroot), (r + xroot)])
self.ax.set_zlim3d([((- r) + zroot), (r + zroot)])
self.ax.set_ylim3d([((- r) + yroot), (r + yroot)]) |
class BatchNormalizationLayer(Layer):
def __call__(self, x, seq_len=None):
n_out = int(x.get_shape()[(- 1)])
decay = self.decay
eps = self.eps
stddev = self.stddev
phase_train = self.phase_train
with tf.variable_scope(self.scope) as scope:
self.check_reuse(scope)
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0), trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev), trainable=True)
(batch_mean, batch_var) = tf.nn.moments(x, self.normal_dim, name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
with tf.variable_scope(tf.get_variable_scope(), reuse=None):
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return (tf.identity(batch_mean), tf.identity(batch_var))
(mean, var) = tf.cond(phase_train, mean_var_with_update, (lambda : (ema.average(batch_mean), ema.average(batch_var))))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
def set_extra_parameters(self, paras=None):
self.decay = 0.9
self.eps = 1e-05
self.stddev = 0.02
self.normal_dim = [0]
self.reuse = False
if (not paras):
return
if ('decay' in paras):
self.decay = paras['decay']
if ('eps' in paras):
self.eps = paras['eps']
if ('stddev' in paras):
self.stddev = paras['stddev']
if ('normal_dim' in paras):
self.normal_dim = paras['normal_dim']
def set_extra_feeds(self, feeds=None):
if (feeds and ('phase_train' in feeds)):
self.phase_train = feeds['phase_train']
else:
self.phase_train = tf.Variable(True) |
def _update_weights(state_dict, tensor_dict, prefix, suffix=None):
dict_prefix = (f'{prefix}_{suffix}' if (suffix is not None) else f'{prefix}')
for (layer_name, param_obj) in state_dict.items():
for (param_name, value) in param_obj.items():
key = '*'.join([dict_prefix, layer_name, param_name])
if (key in tensor_dict):
state_dict[layer_name][param_name] = tensor_dict[key] |
def layout():
_ids = get_doc_ids_from_db()
dropdown_dates = {num2str_month(_id): _id for _id in _ids}
children_list = [html.Div([html.Div([html.H3('Write topic labels to database'), dcc.Markdown('\n This app allows a user to inspect the results for monthly topics from our production\n database, and update its topic labels so that they are human readable. The labels are \n directly written to the production database (without modifying any of the topic words themselves).\n\n The write operation is quite safe - the only fields being overwritten are the topic "names",\n i.e. the labels that represent what the topic stands for. This does not affect anything in production.\n Once the write operation completes, head straight to the \n [topic model dashboard]( to inspect the updated the charts showing the topic breakdown.\n ')]), html.H4('Topic month'), html.P('\n Select the topic month from the dropdown to inspect/update the word distributions for \n that month.\n '), dcc.Dropdown(id='date-dropdown', options=[{'label': date_str, 'value': date_num} for (date_str, date_num) in dropdown_dates.items()], value=_ids[(- 1)], style={'text-align': 'center'}), html.Br(), html.Div(dcc.Loading(id='loading-progress-1', children=[dcc.Store(id='topic-data-write')])), html.Div(dash_table.DataTable(id='topic-table-write', columns=[{'name': 'Topic', 'id': 'num'}, {'name': 'Topic labels', 'id': 'topic_names'}, {'name': 'Words', 'id': 'topic_words'}], style_table={'overflowX': 'auto'}, style_cell={'backgroundColor': 'rgba(102, 204, 204, 0.05)', 'textAlign': 'left', 'font_family': 'Arial'}, style_data={'height': 'auto', 'lineHeight': '30px'}, style_cell_conditional=[{'if': {'column_id': 'num'}, 'minWidth': '60px', 'width': '60px', 'maxWidth': '60px'}], style_header={'backgroundColor': 'rgb(255, 255, 255)', 'text-align': 'left'}, style_as_list_view=True)), html.Br(), html.H4('Label topics manually'), html.P('\n Use 3-4 words to describe each topic distribution. Once finished, click on the\n "Save entries" button below to write the updated topic names to the database. \n '), html.Div(id='create-text-boxes'), html.Div([html.Button(id='write-button', n_clicks=0, children='Save entries')], style={'display': 'flex', 'justifyContent': 'center'}), dcc.Loading(id='loading-progress-2', children=[html.P(id='topic-name-fields')], type='default')])]
return children_list |
def generate_constant(output_name, tensor_name, data_type, dims, vals):
t = onnx.helper.make_tensor(tensor_name, data_type=data_type, dims=dims, vals=vals)
c = onnx.helper.make_node('Constant', [], [output_name], value=t)
return c |
class PredicateMapping():
def __init__(self) -> None:
self.symbols2predicate = {}
self.counter = 0
def add_mapping(self, predicate: (BoolExpr or A_Expr)) -> Symbol:
res = symbols(str(self.counter))
self.symbols2predicate[res] = predicate
self.counter += 1
return res
def retrieve_predicate(self, symbol):
return self.symbols2predicate[symbol] |
def conjugate_gradient(A, b, max_iters, res_tol=1e-10):
x = torch.zeros_like(b)
r = (b - A(x))
p = r
rTr = (r.T r)
for _ in range(max_iters):
Ap = A(p)
alpha = (rTr / (p.T Ap))
x = (x + (alpha * p))
r = (r - (alpha * Ap))
if (torch.norm(r) < res_tol):
break
rTrnew = (r.T r)
beta = (rTrnew / rTr)
p = (r + (beta * p))
rTr = rTrnew
return x |
def adjust_scales2image(size, opt):
opt.num_scales = (math.ceil(math.log(math.pow((opt.min_size / size), 1), opt.scale_factor_init)) + 1)
scale2stop = math.ceil(math.log((min([opt.max_size, size]) / size), opt.scale_factor_init))
opt.stop_scale = (opt.num_scales - scale2stop)
opt.scale1 = min((opt.max_size / size), 1)
opt.scale_factor = math.pow((opt.min_size / size), (1 / opt.stop_scale))
scale2stop = math.ceil(math.log((min([opt.max_size, size]) / size), opt.scale_factor_init))
opt.stop_scale = (opt.num_scales - scale2stop) |
class FieldPair():
def __init__(self, name, content):
self.name = name
self.content = content |
def deconv5x5_relu(in_channels, out_channels, stride, output_padding):
return deconv(in_channels, out_channels, 5, stride, 2, output_padding=output_padding, activation_fn=partial(nn.ReLU, inplace=True)) |
class FakeData(data.Dataset):
def __init__(self, size=1000, image_size=(3, 224, 224), num_classes=10, transform=None, target_transform=None, random_offset=0):
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.transform = transform
self.target_transform = target_transform
self.random_offset = random_offset
def __getitem__(self, index):
rng_state = torch.get_rng_state()
torch.manual_seed((index + self.random_offset))
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
img = transforms.ToPILImage()(img)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return self.size
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
def get_batch(bs, all_X, all_sup_Nary_Y, all_sup_Y, d, K):
inds = np.random.randint(0, d, bs)
X = np.zeros((bs, d), dtype=np.float32)
Z = np.sign(np.random.randn(bs, K)).astype(np.float32)
Y = np.zeros((bs, K), dtype=np.float32)
for (j, ind) in enumerate(inds):
X[j] = all_X[ind]
Y[j] = all_sup_Y[ind]
Z[(j, all_sup_Nary_Y[ind])] = 1
return (X, Z, Y) |
def remove_prefixes_line(line):
line = line.strip().replace('\n', ' ').replace('\t', ' ').replace('<PARAGRAPH><PARAGRAPH>', '<PARAGRAPH>')
line = line.replace('See Important Quotations Explained', '').strip()
line = line.replace('Chapter 5: The Wine-shop', 'Chapter 5: The Wine shop').strip()
line = line.replace('Chapter 13: Fifty-two', 'Chapter 13: Fifty two').strip()
line = line.replace('Chapter 1: The Prison-Door', 'Chapter 1: The Prison Door').strip()
line = line.replace('Chapter 19: The Child at the Brook-Side', 'Chapter 19: The Child at the Brook Side').strip()
line = line.lstrip(punctuation).strip()
pat_translation = '^([<PARAGRAPH>]{0,}Read a translation of.*?(scene|scenes|chapter|chapters|act) [ivxl|0-9]{1,}(.*?)-[ ]{0,})(.*$)'
if re.match(pat_translation, line, re.IGNORECASE):
to_replace = re.match(pat_translation, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '').strip()
line = line.replace('The tension over George mounts in ', '').strip()
pat = '^((of Vol.)[ ]{0,}[ivxl][ ]{0,}[:|,|-]{0,})'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace, '').strip()
pat = '^([<PARAGRAPH>]{0,}(summary|summary and analysis|summary & analysis)[ ]{0,}[:|,|-|;]{0,})'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace, '').strip()
line = line.lstrip(punctuation).strip()
return line.strip() |
def get_argparse_groups(parser):
groups = {}
for group in parser._action_groups:
group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions}
groups[group.title] = argparse.Namespace(**group_dict)
return groups |
class TD3(TorchRLAlgorithm):
def __init__(self, env, qf1, qf2, policy, exploration_policy, eval_policy=None, target_policy_noise=0.2, target_policy_noise_clip=0.5, policy_learning_rate=0.001, qf_learning_rate=0.001, policy_and_target_update_period=2, tau=0.005, qf_criterion=None, optimizer_class=optim.Adam, **kwargs):
super().__init__(env, exploration_policy, eval_policy=(eval_policy or policy), **kwargs)
if (qf_criterion is None):
qf_criterion = nn.MSELoss()
self.qf1 = qf1
self.qf2 = qf2
self.policy = policy
self.target_policy_noise = target_policy_noise
self.target_policy_noise_clip = target_policy_noise_clip
self.policy_and_target_update_period = policy_and_target_update_period
self.tau = tau
self.qf_criterion = qf_criterion
self.target_policy = policy.copy()
self.target_qf1 = self.qf1.copy()
self.target_qf2 = self.qf2.copy()
self.qf1_optimizer = optimizer_class(self.qf1.parameters(), lr=qf_learning_rate)
self.qf2_optimizer = optimizer_class(self.qf2.parameters(), lr=qf_learning_rate)
self.policy_optimizer = optimizer_class(self.policy.parameters(), lr=policy_learning_rate)
def _do_training(self):
batch = self.get_batch()
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
self._train_given_data(rewards, terminals, obs, actions, next_obs)
def _train_given_data(self, rewards, terminals, obs, actions, next_obs, logger_prefix=''):
next_actions = self.target_policy(next_obs)
noise = torch.normal(torch.zeros_like(next_actions), self.target_policy_noise)
noise = torch.clamp(noise, (- self.target_policy_noise_clip), self.target_policy_noise_clip)
noisy_next_actions = (next_actions + noise)
target_q1_values = self.target_qf1(next_obs, noisy_next_actions)
target_q2_values = self.target_qf2(next_obs, noisy_next_actions)
target_q_values = torch.min(target_q1_values, target_q2_values)
q_target = ((self.reward_scale * rewards) + (((1.0 - terminals) * self.discount) * target_q_values))
q_target = q_target.detach()
q1_pred = self.qf1(obs, actions)
bellman_errors_1 = ((q1_pred - q_target) ** 2)
qf1_loss = bellman_errors_1.mean()
q2_pred = self.qf2(obs, actions)
bellman_errors_2 = ((q2_pred - q_target) ** 2)
qf2_loss = bellman_errors_2.mean()
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
policy_actions = policy_loss = None
if ((self._n_train_steps_total % self.policy_and_target_update_period) == 0):
policy_actions = self.policy(obs)
q_output = self.qf1(obs, policy_actions)
policy_loss = (- q_output.mean())
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
ptu.soft_update_from_to(self.policy, self.target_policy, self.tau)
ptu.soft_update_from_to(self.qf1, self.target_qf1, self.tau)
ptu.soft_update_from_to(self.qf2, self.target_qf2, self.tau)
if self.need_to_update_eval_statistics:
self.need_to_update_eval_statistics = False
if (policy_loss is None):
policy_actions = self.policy(obs)
q_output = self.qf1(obs, policy_actions)
policy_loss = (- q_output.mean())
self.eval_statistics[(logger_prefix + 'QF1 Loss')] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics[(logger_prefix + 'QF2 Loss')] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics[(logger_prefix + 'Policy Loss')] = np.mean(ptu.get_numpy(policy_loss))
self.eval_statistics.update(create_stats_ordered_dict((logger_prefix + 'Q1 Predictions'), ptu.get_numpy(q1_pred)))
self.eval_statistics.update(create_stats_ordered_dict((logger_prefix + 'Q2 Predictions'), ptu.get_numpy(q2_pred)))
self.eval_statistics.update(create_stats_ordered_dict((logger_prefix + 'Q Targets'), ptu.get_numpy(q_target)))
self.eval_statistics.update(create_stats_ordered_dict((logger_prefix + 'Bellman Errors 1'), ptu.get_numpy(bellman_errors_1)))
self.eval_statistics.update(create_stats_ordered_dict((logger_prefix + 'Bellman Errors 2'), ptu.get_numpy(bellman_errors_2)))
self.eval_statistics.update(create_stats_ordered_dict((logger_prefix + 'Policy Action'), ptu.get_numpy(policy_actions)))
def get_epoch_snapshot(self, epoch):
snapshot = super().get_epoch_snapshot(epoch)
self.update_epoch_snapshot(snapshot)
return snapshot
def update_epoch_snapshot(self, snapshot):
snapshot.update(qf1=self.qf1, qf2=self.qf2, policy=self.eval_policy, trained_policy=self.policy, target_policy=self.target_policy, exploration_policy=self.exploration_policy)
def networks(self):
return [self.policy, self.qf1, self.qf2, self.target_policy, self.target_qf1, self.target_qf2] |
def test_meta_post_init(synthetic_continuous_bandit_feedback: BanditFeedback) -> None:
ope_ = ContinuousOffPolicyEvaluation(bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw, ipw2])
assert (ope_.ope_estimators_ == {'ipw': ipw2}), '__post_init__ returns a wrong value'
ope_ = ContinuousOffPolicyEvaluation(bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw, ipw3])
assert (ope_.ope_estimators_ == {'ipw': ipw, 'ipw3': ipw3}), '__post_init__ returns a wrong value'
necessary_keys = ['action_by_behavior_policy', 'reward', 'pscore']
for i in range(len(necessary_keys)):
for deleted_keys in itertools.combinations(necessary_keys, (i + 1)):
invalid_bandit_feedback_dict = {key: '_' for key in necessary_keys}
for k in deleted_keys:
del invalid_bandit_feedback_dict[k]
with pytest.raises(RuntimeError, match='Missing key*'):
_ = ContinuousOffPolicyEvaluation(bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[ipw]) |
def add_mim_extension():
if ('develop' in sys.argv):
if (platform.system() == 'Windows'):
mode = 'copy'
else:
mode = 'symlink'
elif (('sdist' in sys.argv) or ('bdist_wheel' in sys.argv) or (platform.system() == 'Windows')):
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmseg', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if (osp.isfile(tar_path) or osp.islink(tar_path)):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if (mode == 'symlink'):
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
mode = 'copy'
warnings.warn(f'Failed to create a symbolic link for {src_relpath}, and it will be copied to {tar_path}')
else:
continue
if (mode == 'copy'):
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}') |
def abs_rel_metric(data_dict: dict, roi=None, max_distance=None):
depth_prediction = data_dict['result']
depth_gt = data_dict['target']
(depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi)
(depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt)
(depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance)
return torch.mean((torch.abs((depth_prediction - depth_gt)) / depth_gt)) |
def semantic_attrs(attrs):
whitelist = ['aria', 'tooltip', 'placeholder', 'label', 'title', 'name']
attrs = [value for (key, value) in attrs.items() if any(((k in key.lower()) for k in whitelist))]
return ' '.join(attrs) |
def run(image, heatmap_body, pose):
heatmap_body = np.transpose(heatmap_body, (1, 2, 0))
bbox = same_margin_bounding_box(pose, model_type, model_['marginBox'])
channel_ind = np.fromiter(model_['indexHM'], dtype=np.int)
cropped_heatmap = crop_input(heatmap_body, bbox, model_['square'], model_['pad'], model_['padValue'], channel_ind)
cropped_image = crop_input(image, bbox, model_['square'], model_['pad'], model_['padValue'], [0, 1, 2])
cropped_heatmap_ = cv2.resize(cropped_heatmap, (model_['boxsize'], model_['boxsize']))
cropped_image_ = cv2.resize(cropped_image, (model_['boxsize'], model_['boxsize']))
cropped_heatmap = np.transpose(cropped_heatmap_[(np.newaxis, ...)], (0, 3, 1, 2))
cropped_image = np.transpose(cropped_image_[(np.newaxis, ...)], (0, 3, 1, 2))
(threeD_pose,) = m.forward(image=Variable(torch.from_numpy(cropped_image).cuda()), heatmap=Variable(torch.from_numpy(cropped_heatmap).cuda()))
return (threeD_pose.data, cropped_image_, cropped_heatmap_) |
def map_checkpoint_to_state_dict(state_dict: Dict[(str, np.ndarray)]):
d = {}
for (full_s, v) in state_dict.items():
split = full_s.split('/')
new = []
for (i, s) in enumerate(split):
if (s == 'Transformer'):
pass
elif (m := re.match('encoderblock_([0-9]+)', s)):
new.append(f'blocks.{m.group(1)}')
elif (s == 'MultiHeadDotProductAttention_1'):
new.append('attn')
so_far = '/'.join(split[:(i + 1)])
q = state_dict[f'{so_far}/query/kernel']
k = state_dict[f'{so_far}/key/kernel']
v = state_dict[f'{so_far}/value/kernel']
q = np.reshape(q, (q.shape[0], (- 1))).transpose((1, 0))
k = np.reshape(k, (k.shape[0], (- 1))).transpose((1, 0))
v = np.reshape(v, (v.shape[0], (- 1))).transpose((1, 0))
qkv = np.concatenate([q, k, v])
d['.'.join((new + ['qkv.weight']))] = qkv
q = state_dict[f'{so_far}/query/bias']
k = state_dict[f'{so_far}/key/bias']
v = state_dict[f'{so_far}/value/bias']
q = np.reshape(q, (- 1))
k = np.reshape(k, (- 1))
v = np.reshape(v, (- 1))
qkv = np.concatenate([q, k, v])
d['.'.join((new + ['qkv.bias']))] = qkv
out_kernel = state_dict[f'{so_far}/out/kernel']
out_kernel = np.reshape(out_kernel, ((out_kernel.shape[0] * out_kernel.shape[1]), out_kernel.shape[2]))
out_bias = state_dict[f'{so_far}/out/bias']
d['.'.join((new + ['proj.weight']))] = out_kernel.transpose((1, 0))
d['.'.join((new + ['proj.bias']))] = out_bias
break
elif (m := re.match('MlpBlock_([0-9]+)', s)):
if (int(m.group(1)) != 3):
raise NotImplementedError()
new.append(f'mlp')
elif (m := re.match('Dense_([0-9]+)', s)):
if (int(m.group(1)) not in {0, 1}):
raise NotImplementedError()
so_far = '/'.join(split[:(i + 1)])
d['.'.join((new + [f'fc{(int(m.group(1)) + 1)}.weight']))] = state_dict[f'{so_far}/kernel'].transpose((1, 0))
d['.'.join((new + [f'fc{(int(m.group(1)) + 1)}.bias']))] = state_dict[f'{so_far}/bias']
break
elif (m := re.match('LayerNorm_([0-9]+)', s)):
if (int(m.group(1)) == 0):
normid = 1
elif (int(m.group(1)) == 2):
normid = 2
else:
raise NotImplementedError()
so_far = '/'.join(split[:(i + 1)])
d['.'.join((new + [f'norm{normid}.bias']))] = state_dict[f'{so_far}/bias']
d['.'.join((new + [f'norm{normid}.weight']))] = state_dict[f'{so_far}/scale']
break
elif (s == 'posembed_input'):
so_far = '/'.join(split[:(i + 1)])
d['pos_embed'] = state_dict[f'{so_far}/pos_embedding']
break
elif (s == 'embedding'):
so_far = '/'.join(split[:(i + 1)])
d['patch_embed.proj.bias'] = state_dict[f'{so_far}/bias']
w = state_dict[f'{so_far}/kernel']
d['patch_embed.proj.weight'] = w.transpose([3, 2, 0, 1])
break
elif (s == 'cls'):
d['cls_token'] = state_dict['cls']
break
elif (s == 'head'):
d['head.bias'] = state_dict['head/bias']
d['head.weight'] = state_dict['head/kernel'].transpose((1, 0))
break
elif (s == 'encoder_norm'):
so_far = '/'.join(split[:(i + 1)])
d['.'.join((new + [f'norm.bias']))] = state_dict[f'{so_far}/bias']
d['.'.join((new + [f'norm.weight']))] = state_dict[f'{so_far}/scale']
break
elif (s == 'pre_logits'):
warnings.warn("ignoring 'pre_logits' since its unused")
break
else:
raise ValueError(full_s)
d = {k: torch.from_numpy(v) for (k, v) in d.items()}
return d |
class RobertaEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.padding_idx = 1
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
def forward(self, input_ids, token_type_ids, words_embeddings=None):
position_ids = self.create_position_ids_from_input_ids(input_ids)
return super().forward(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, words_embeddings=words_embeddings)
def create_position_ids_from_input_ids(self, x):
mask = x.ne(self.padding_idx).long()
incremental_indicies = (torch.cumsum(mask, dim=1) * mask)
return (incremental_indicies + self.padding_idx) |
def test_argmin_argmax():
array = ak.highlevel.Array([[[np.datetime64('2022'), np.datetime64('2023'), np.datetime64('2025')], [], [np.datetime64('2027'), np.datetime64('2011')], [np.datetime64('2013')]], [], [[np.datetime64('2017'), np.datetime64('2019')], [np.datetime64('2023')]]], check_valid=True)
assert (to_list(ak.operations.argmin(array, axis=0)) == [[1, 1, 0], [1], [0, 0], [0]])
assert (to_list(ak.operations.argmax(array, axis=0)) == [[0, 0, 0], [1], [0, 0], [0]])
assert (to_list(ak.operations.argmin(array, axis=1)) == [[3, 2, 0], [], [0, 0]])
assert (to_list(ak.operations.argmax(array, axis=1)) == [[2, 0, 0], [], [1, 0]])
array = ak.operations.from_iter([[[np.datetime64('2021-01-20'), np.datetime64('2021-01-10'), np.datetime64('2021-01-30')]], [[]], [None, None, None], [[np.datetime64('2021-01-14'), np.datetime64('2021-01-15'), np.datetime64('2021-01-16')]]], highlevel=False)
assert (to_list(ak.argmin(array, axis=2, highlevel=False)) == [[1], [None], [None, None, None], [0]]) |
class MinLengthLogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _make_dmc(obs_type, domain, task, frame_stack, action_repeat, seed, task_kwargs=None):
visualize_reward = False
if (task_kwargs is None):
task_kwargs = {}
task_kwargs['random'] = seed
env = cdmc.make(domain, task, task_kwargs=task_kwargs, environment_kwargs=dict(flat_observation=True), visualize_reward=visualize_reward)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
if (obs_type == 'pixels'):
camera_id = dict(quadruped=2).get(domain, 0)
render_kwargs = dict(height=84, width=84, camera_id=camera_id)
env = pixels.Wrapper(env, pixels_only=True, render_kwargs=render_kwargs)
return env |
class LabelledOrderedTrees(UniqueRepresentation, Parent):
def __init__(self, category=None):
if (category is None):
category = Sets()
Parent.__init__(self, category=category)
def _repr_(self):
return 'Labelled ordered trees'
def cardinality(self):
return Infinity
def _an_element_(self):
LT = self._element_constructor_
t = LT([], label=3)
t1 = LT([t, t], label=42)
t2 = LT([[]], label=5)
return LT([t, t1, t2], label='toto')
def _element_constructor_(self, *args, **keywords):
return self.element_class(self, *args, **keywords)
def unlabelled_trees(self):
return OrderedTrees_all()
def labelled_trees(self):
return self
Element = LabelledOrderedTree |
class StructureFormat(StructuredVoidFormat):
def __init__(self, *args, **kwargs):
warnings.warn('StructureFormat has been replaced by StructuredVoidFormat', DeprecationWarning, stacklevel=2)
super(StructureFormat, self).__init__(*args, **kwargs) |
def to_expression_or_string(string_expr: str) -> Any:
try:
return ast.literal_eval(string_expr)
except ValueError:
return string_expr |
class ClassScope(Scope):
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
self.class_name = name
self.doc = None
def lookup(self, name):
entry = Scope.lookup(self, name)
if entry:
return entry
if (name == 'classmethod'):
entry = Entry('classmethod', '__Pyx_Method_ClassMethod', PyrexTypes.CFuncType(py_object_type, [PyrexTypes.CFuncTypeArg('', py_object_type, None)], 0, 0))
entry.utility_code_definition = Code.UtilityCode.load_cached('ClassMethod', 'CythonFunction.c')
self.use_entry_utility_code(entry)
entry.is_cfunction = 1
return entry |
class Rouge(Rouge155):
DEFAULT_OPTIONS = ['-a', '-n', 4, '-x', '-2', 4, '-u', '-c', 95, '-r', 1000, '-f', 'A', '-p', 0.5, '-t', 0, '-d']
def __init__(self, n_words=None, keep_files=False, options=None):
if (options is None):
self.options = self.DEFAULT_OPTIONS.copy()
else:
self.options = options
if n_words:
options.extend(['-l', n_words])
stem = ('-m' in self.options)
super(Rouge, self).__init__(n_words=n_words, stem=stem, keep_files=keep_files)
def _run_rouge(self):
options = ((['-e', self._rouge_data] + list(map(str, self.options))) + [os.path.join(self._config_dir, 'settings.xml')])
logging.info('Running ROUGE with options {}'.format(' '.join(options)))
pipes = Popen(([self._rouge_bin] + options), stdout=PIPE, stderr=PIPE)
(std_out, std_err) = pipes.communicate()
div_by_zero_error = std_err.decode('utf-8').startswith('Illegal division by zero')
if ((pipes.returncode == 0) or div_by_zero_error):
return std_out
else:
raise ValueError(((std_out.decode('utf-8') + '\n') + std_err.decode('utf-8'))) |
class LightPoleCartPole(ModifiableCartPoleEnv):
def __init__(self):
super(LightPoleCartPole, self).__init__()
self.masspole = self.EXTREME_LOWER_MASSPOLE
self._followup()
def parameters(self):
parameters = super(LightPoleCartPole, self).parameters
parameters.update({'mass': self.masspole})
return parameters |
def boardd_loop(rate=200):
rk = Ratekeeper(rate)
context = zmq.Context()
can_init()
logcan = messaging.pub_sock(context, service_list['can'].port)
health_sock = messaging.pub_sock(context, service_list['health'].port)
sendcan = messaging.sub_sock(context, service_list['sendcan'].port)
while 1:
if ((rk.frame % rate) == 0):
health = can_health()
msg = messaging.new_message()
msg.init('health')
msg.health.voltage = health['voltage']
msg.health.current = health['current']
msg.health.started = health['started']
health_sock.send(msg.to_bytes())
can_msgs = can_recv()
if (len(can_msgs) > 0):
dat = can_list_to_can_capnp(can_msgs)
logcan.send(dat.to_bytes())
tsc = messaging.recv_sock(sendcan)
if (tsc is not None):
can_send_many(can_capnp_to_can_list(tsc.sendcan))
rk.keep_time() |
class TestUniformRandomWalk(object):
def test_parameter_checking(self):
g = create_test_graph()
urw = UniformRandomWalk(g)
nodes = ['0']
n = 1
length = 2
seed = None
with pytest.raises(ValueError):
urw.run(nodes=None, n=n, length=length, seed=seed)
with pytest.raises(ValueError):
urw.run(nodes='0', n=n, length=length, seed=seed)
with pytest.raises(ValueError):
urw.run(nodes=nodes, n=0, length=length, seed=seed)
with pytest.raises(ValueError):
urw.run(nodes=nodes, n=(- 121), length=length, seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=21.4, length=length, seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=(- 0.5), length=length, seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=0.0001, length=length, seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n='2', length=length, seed=seed)
with pytest.raises(ValueError):
urw.run(nodes=nodes, n=n, length=0, seed=seed)
with pytest.raises(ValueError):
urw.run(nodes=nodes, n=n, length=(- 5), seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=n, length=11.9, seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=n, length=(- 9.9), seed=seed)
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=n, length='10', seed=seed)
with pytest.raises(ValueError):
urw.run(nodes=nodes, n=n, length=length, seed=(- 1))
with pytest.raises(TypeError):
urw.run(nodes=nodes, n=n, length=length, seed=1010.8)
nodes = []
subgraph = urw.run(nodes=nodes, n=n, length=length, seed=None)
assert (len(subgraph) == 0)
def test_walk_generation_single_root_node(self):
g = create_test_graph()
urw = UniformRandomWalk(g)
nodes = ['0']
n = 1
length = 1
seed = 42
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs[0]) == length)
length = 2
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
for subgraph in subgraphs:
assert (len(subgraph) == length)
length = 2
n = 2
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == length)
n = 3
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == length)
def test_walk_generation_many_root_nodes(self):
g = create_test_graph()
urw = UniformRandomWalk(g)
nodes = ['0', 2]
n = 1
length = 1
seed = None
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == (n * len(nodes)))
for (i, subgraph) in enumerate(subgraphs):
assert (len(subgraph) == length)
assert (subgraph[0] == nodes[i])
length = 2
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == (n * len(nodes)))
for subgraph in subgraphs:
assert (len(subgraph) <= length)
n = 2
length = 2
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == (n * len(nodes)))
for subgraph in subgraphs:
assert (len(subgraph) <= length)
length = 3
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == (n * len(nodes)))
for subgraph in subgraphs:
assert (len(subgraph) <= length)
n = 5
length = 10
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == (n * len(nodes)))
for subgraph in subgraphs:
assert (len(subgraph) <= length)
def test_walk_generation_loner_root_node(self):
g = create_test_graph()
urw = UniformRandomWalk(g)
nodes = ['loner']
n = 1
length = 1
seed = None
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == 1)
assert (len(subgraphs[0]) == 1)
n = 10
length = 1
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == 1)
n = 10
length = 10
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == 1)
def test_walk_generation_self_loner_root_node(self):
g = create_test_graph()
urw = UniformRandomWalk(g)
nodes = ['self loner']
n = 1
length = 1
seed = None
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == 1)
assert (len(subgraphs[0]) == 1)
n = 10
length = 1
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == length)
for node in subgraph:
assert (node == 'self loner')
n = 1
length = 99
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == length)
for node in subgraph:
assert (node == 'self loner')
n = 10
length = 10
subgraphs = urw.run(nodes=nodes, n=n, length=length, seed=seed)
assert (len(subgraphs) == n)
for subgraph in subgraphs:
assert (len(subgraph) == length)
for node in subgraph:
assert (node == 'self loner')
def test_init_parameters(self):
g = create_test_graph()
nodes = ['0', 2]
n = 1
length = 2
seed = 0
urw = UniformRandomWalk(g, n=n, length=length, seed=seed)
urw_no_params = UniformRandomWalk(g)
run_1 = urw.run(nodes=nodes)
run_2 = urw_no_params.run(nodes=nodes, n=n, length=length, seed=seed)
np.testing.assert_array_equal(run_1, run_2)
def test_benchmark_uniformrandomwalk(self, benchmark):
g = example_graph_random(n_nodes=100, n_edges=500)
urw = UniformRandomWalk(g)
nodes = np.arange(0, 50)
n = 2
n = 5
length = 5
benchmark((lambda : urw.run(nodes=nodes, n=n, length=length))) |
def _get_global_header(im, info):
version = b'87a'
for extensionKey in ['transparency', 'duration', 'loop', 'comment']:
if (info and (extensionKey in info)):
if (((extensionKey == 'duration') and (info[extensionKey] == 0)) or ((extensionKey == 'comment') and (not (1 <= len(info[extensionKey]) <= 255)))):
continue
version = b'89a'
break
else:
if (im.info.get('version') == b'89a'):
version = b'89a'
background = _get_background(im, info.get('background'))
palette_bytes = _get_palette_bytes(im)
color_table_size = _get_color_table_size(palette_bytes)
return [(((b'GIF' + version) + o16(im.size[0])) + o16(im.size[1])), o8((color_table_size + 128)), (o8(background) + o8(0)), _get_header_palette(palette_bytes)] |
def timing(f):
(f)
def wrap(*args, **kw):
if is_master():
ts = time.time()
result = f(*args, **kw)
te = time.time()
mprint('func:{!r} took: {:2.4f} sec'.format(f.__name__, (te - ts)))
else:
result = f(*args, **kw)
return result
return wrap |
def do_vcs_install(versionfile_source, ipy):
GITS = ['git']
if (sys.platform == 'win32'):
GITS = ['git.cmd', 'git.exe']
files = [versionfile_source]
if ipy:
files.append(ipy)
try:
my_path = __file__
if (my_path.endswith('.pyc') or my_path.endswith('.pyo')):
my_path = (os.path.splitext(my_path)[0] + '.py')
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = 'versioneer.py'
files.append(versioneer_file)
present = False
try:
with open('.gitattributes', 'r') as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if ('export-subst' in line.strip().split()[1:]):
present = True
break
except OSError:
pass
if (not present):
with open('.gitattributes', 'a+') as fobj:
fobj.write(f'''{versionfile_source} export-subst
''')
files.append('.gitattributes')
run_command(GITS, (['add', '--'] + files)) |
class BCELossWithQuant(nn.Module):
def __init__(self, codebook_weight=1.0):
super().__init__()
self.codebook_weight = codebook_weight
def forward(self, qloss, target, prediction, split):
bce_loss = F.binary_cross_entropy_with_logits(prediction, target)
loss = (bce_loss + (self.codebook_weight * qloss))
return (loss, {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/bce_loss'.format(split): bce_loss.detach().mean(), '{}/quant_loss'.format(split): qloss.detach().mean()}) |
class SawyerFaucetOpenEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, (- 0.15))
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.8, 0.0)
obj_high = (0.05, 0.85, 0.0)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.8, 0.0]), 'hand_init_pos': np.array([0.0, 0.4, 0.2])}
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self.max_path_length = 150
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.handle_length = 0.175
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_faucet.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.05))}
return (ob, reward, False, info)
def _target_site_config(self):
return [('goal_open', self._target_pos), ('goal_close', np.array([10.0, 10.0, 10.0]))]
def _get_pos_objects(self):
knob_center = (self.get_body_com('faucetBase') + np.array([0.0, 0.0, 0.125]))
knob_angle_rad = self.data.get_joint_qpos('knob_Joint_1')
offset = np.array([np.sin(knob_angle_rad), (- np.cos(knob_angle_rad)), 0])
offset *= self.handle_length
return (knob_center + offset)
def reset_model(self):
self._reset_hand()
self.obj_init_pos = (self._get_state_rand_vec() if self.random_init else self.init_config['obj_init_pos'])
self.sim.model.body_pos[self.model.body_name2id('faucetBase')] = self.obj_init_pos
self._target_pos = (self.obj_init_pos + np.array([(+ self.handle_length), 0.0, 0.125]))
self.maxPullDist = np.linalg.norm((self._target_pos - self.obj_init_pos))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.linalg.norm((objPos - pullGoal))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
def _build_tree(paths):
assert all(((cp[(- 1)] == paths[0][(- 1)]) for cp in paths))
g = nx.DiGraph()
node_set = {y for x in paths for y in x}
g.add_nodes_from(node_set)
for cp in paths:
for ce in zip(cp[0:(- 1)], cp[1:]):
g.add_edge(ce[1], ce[0])
root = paths[0][(- 1)]
_compute_tree_height(g, root)
return (g, root) |
class Logger(object):
def __init__(self, log_dir: str):
self.writer = tf.compat.v1.summary.FileWriter(log_dir)
def scalar_summary(self, tag: str, value: float, step: int):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
self.writer.flush()
def image_summary(self, tag: str, images: list, step: int):
img_summaries = []
for (i, img) in enumerate(images):
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format='png')
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1])
img_summaries.append(tf.Summary.Value(tag=('%s/%d' % (tag, i)), image=img_sum))
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag: str, values: list, step: int, bins: int=1000):
(counts, bin_edges) = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum((values ** 2)))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush() |
def get_dispatch_callee(declaration):
if is_tensor_method(declaration):
return 'self.{}'.format(declaration['name'])
elif is_torch_function(declaration):
namespace = function_namespace(declaration)
return '{}::{}'.format(namespace, declaration['name'])
else:
raise RuntimeError('could not dispatch, neither namespace function nor Tensor method') |
class VGGLoss(nn.Module):
def __init__(self):
super(VGGLoss, self).__init__()
self.vgg = VGG19()
self.vgg.eval()
util.set_requires_grad(self.vgg, False)
self.criterion = nn.L1Loss()
self.weights = [(1.0 / 32), (1.0 / 16), (1.0 / 8), (1.0 / 4), 1.0]
def forward(self, x, y):
loss = 0
x_vgg = self.vgg(x)
with torch.no_grad():
y_vgg = self.vgg(y)
for i in range(len(x_vgg)):
loss += (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
return loss |
def print_and_export_results(results: dict, method: str):
print('\n ')
print(' Results summary ')
print(' ')
print(f" average image rocauc: {results['average image rocauc']:.2f} ")
print(f" average pixel rocauc: {results['average pixel rocauc']:.2f} ")
print(' \n')
timestamp = datetime.now().strftime('%d_%m_%Y_%H_%M_%S')
name = f'{method}_{timestamp}'
results_yaml_path = f'./results/{name}.yml'
scoreboard_path = f'./results/{name}.txt'
with open(results_yaml_path, 'w') as outfile:
yaml.safe_dump(results, outfile, default_flow_style=False)
with open(scoreboard_path, 'w') as outfile:
outfile.write(serialize_results(results['per_class_results']))
print(f' Results written to {results_yaml_path}') |
class MaskedLossWrapper(nn.Module):
def __init__(self, loss_fn, device):
super().__init__()
self.loss_fn = loss_fn
self.device = device
def _get_mask(self, targets):
mask = torch.ones(targets.shape)
mask[(targets == UNCERTAIN)] = 0
mask[(targets == MISSING)] = 0
mask = mask.to(self.device)
return mask
def forward(self, logits, targets):
loss = self.loss_fn(logits, targets)
mask = self._get_mask(targets)
loss = (loss * mask)
loss = loss.sum()
loss = (loss * (1 / mask.sum()))
return loss |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.