code stringlengths 101 5.91M |
|---|
def is_definite(self):
try:
def_str = self.__definiteness_string
except AttributeError:
self.compute_definiteness()
def_str = self.__definiteness_string
return ((def_str == 'pos_def') or (def_str == 'neg_def') or (def_str == 'zero')) |
class TransRec(object):
def __init__(self, emb_size, num_usr, num_item):
self.emb_size = emb_size
self.item_count = num_item
self.user_count = num_usr
self.init = self.init = tf.random_uniform_initializer(minval=((- 6) / math.sqrt(self.emb_size)), maxval=(6 / math.sqrt(self.emb_size)))
self.input_Seq = tf.placeholder(tf.int32, [None, 1])
self.input_Usr = tf.placeholder(tf.int32, [None])
self.input_NegT = tf.placeholder(tf.int32, [None, None])
self.input_PosT = tf.placeholder(tf.int32, [None, 1])
(self.loss, self.output) = self.build_model(self.input_Seq, self.input_Usr, self.input_NegT, self.input_PosT)
def loss_function(self, X_uti, X_utj):
return ((- 1) * tf.reduce_mean(tf.log(tf.sigmoid(tf.squeeze((X_uti - X_utj)))), (- 1)))
def l2_distance(self, x, y):
a = tf.reduce_sum(tf.square((x - y)), axis=(- 1), keep_dims=True)
return a
def build_model(self, in_Seq, in_Usr, in_Neg, in_Pos):
self.user_emb = tf.get_variable('user_emb', [self.user_count, self.emb_size], initializer=self.init)
self.item_emb = tf.get_variable('item_emb', [self.item_count, self.emb_size], initializer=self.init)
self.Beta = tf.get_variable('Beta', [self.item_count, 1], initializer=self.init)
self.T = T = tf.get_variable('T', [self.emb_size], initializer=self.init)
last_item = tf.nn.embedding_lookup(self.item_emb, in_Seq)
next_item = tf.nn.embedding_lookup(self.item_emb, in_Pos)
neg_items = tf.nn.embedding_lookup(self.item_emb, in_Neg)
tu = tf.expand_dims(tf.nn.embedding_lookup(self.user_emb, in_Usr), 1)
last_item = tf.clip_by_norm(last_item, 1, (- 1))
next_item = tf.clip_by_norm(next_item, 1, (- 1))
neg_items = tf.clip_by_norm(neg_items, 1, (- 1))
tu = tf.clip_by_norm(tu, 1, (- 1))
output = ((tu + T) + last_item)
bias_pos = tf.nn.embedding_lookup(self.Beta, in_Pos)
pos_score = (bias_pos - self.l2_distance(output, next_item))
bias_neg = tf.nn.embedding_lookup(self.Beta, in_Neg)
neg_score = (bias_neg - self.l2_distance(output, neg_items))
loss = self.loss_function(pos_score, neg_score)
return (loss, output)
def predict(self):
tu = tf.expand_dims(tf.nn.embedding_lookup(self.user_emb, self.input_Usr), 1)
last_item = tf.nn.embedding_lookup(self.item_emb, self.input_Seq)
all_index = tf.convert_to_tensor([[i for i in range(self.item_count)]])
all_index = tf.tile(all_index, [tf.shape(self.input_Usr)[0], 1])
next_item = tf.nn.embedding_lookup(self.item_emb, all_index)
last_item = tf.clip_by_norm(last_item, 1, (- 1))
next_item = tf.clip_by_norm(next_item, 1, (- 1))
tu = tf.clip_by_norm(tu, 1, (- 1))
Beta = tf.nn.embedding_lookup(self.Beta, all_index)
score = (Beta - self.l2_distance(((tu + self.T) + last_item), next_item))
return score |
class LargeCremonaDatabase(MiniCremonaDatabase):
_expected_skeleton = _cremonaSkeleton
def allbsd(self, N):
ret = {}
for c in self.__connection__.cursor().execute(('SELECT curve,cp,om,L,' + 'reg,sha FROM t_curve,t_class USING(class) WHERE conductor=?'), (int(N),)):
(N, iso, num) = parse_cremona_label(c[0])
ret[(iso + str(num))] = list(c[1:])
return ret
def allgens(self, N):
ret = {}
for c in self.__connection__.cursor().execute(('SELECT curve,gens ' + 'FROM t_curve,t_class USING(class) WHERE conductor=?'), (int(N),)):
(N, iso, num) = parse_cremona_label(c[0])
ret[(iso + str(num))] = eval(c[1])
return ret
def degphi(self, N):
ret = {}
for c in self.__connection__.cursor().execute((('SELECT curve,deg FROM' + ' t_curve,t_class USING(class) WHERE curve=class||1 AND ') + 'conductor=?'), (int(N),)):
(N, iso, num) = parse_cremona_label(c[0])
ret[(iso + str(num))] = c[1]
return ret
def _init_degphi(self, ftpdata, largest_conductor=0):
if self.__read_only__:
raise RuntimeError('The database must not be read_only.')
files = sorted(os.listdir(ftpdata))
name = 'degphi'
con = self.get_connection()
for F in files:
if (not (F[:len(name)] == name)):
continue
print('Inserting', F)
class_data = []
for L in open(((ftpdata + '/') + F)).readlines():
(N, iso, num, degree, primes, curve) = L.split()
if (largest_conductor and (int(N) > largest_conductor)):
break
class_data.append((degree, (N + iso)))
con.executemany('UPDATE t_class SET deg=? WHERE class=?', class_data)
print('Committing...')
self.commit()
if (largest_conductor and (int(N) > largest_conductor)):
break
def _init_allbsd(self, ftpdata, largest_conductor=0):
if self.__read_only__:
raise RuntimeError('The database must not be read_only.')
files = sorted(os.listdir(ftpdata))
name = 'allbsd'
con = self.get_connection()
for F in files:
if (not (F[:len(name)] == name)):
continue
print('Inserting', F)
curve_data = []
class_data = []
for L in open(((ftpdata + '/') + F)).readlines():
(N, iso, num, eqn, rank, tor, cp, om, L, reg, sha) = L.split()
if (largest_conductor and (int(N) > largest_conductor)):
break
cls = (N + iso)
if (num == '1'):
class_data.append((L, cls))
curve_data.append((cp, om, reg, eval(sha), (cls + num)))
con.executemany('UPDATE t_class SET L=? WHERE class=?', class_data)
con.executemany(('UPDATE t_curve SET cp=?,om=?,reg=?,sha=? WHERE ' + 'curve=?'), curve_data)
print('Committing...')
self.commit()
if (largest_conductor and (int(N) > largest_conductor)):
break
def _init_allgens(self, ftpdata, largest_conductor=0):
if self.__read_only__:
raise RuntimeError('The database must not be read_only.')
files = sorted(os.listdir(ftpdata))
name = 'allgens'
con = self.get_connection()
for F in files:
if (not (F[:len(name)] == name)):
continue
print('Inserting', F)
curve_data = []
for L in open(((ftpdata + '/') + F)).readlines():
v = L.split()
if (largest_conductor and (int(v[0]) > largest_conductor)):
break
gens = (('[' + ','.join(v[6:(6 + int(v[4]))]).replace(':', ',')) + ']')
curve_data.append((gens, ''.join(v[:3])))
con.executemany('UPDATE t_curve SET gens=? WHERE curve=?', curve_data)
print('Committing...')
if (largest_conductor and (int(v[0]) > largest_conductor)):
break |
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, (- 1), 2, (- 3), 4, (- 5)]
msg = 'interval'
with pytest.warns(UserWarning, match=msg):
is_increasing = check_increasing(x, y)
assert (not is_increasing) |
def merge_files(python_files):
classes = []
for file in python_files:
with open(file, 'r') as f:
tree = ast.parse(f.read())
(class_defs, func_defs) = find_classes_and_funcs(tree)
for class_def in class_defs:
transformer = RewriteName(class_def.name)
static_func_defs = [ast.FunctionDef(name=func_def.name, args=func_def.args, body=[transformer.visit(node) for node in func_def.body], decorator_list=([ast.Name(id='staticmethod', ctx=ast.Load())] + func_def.decorator_list), returns=func_def.returns) for func_def in func_defs]
transformer = RemoveImport()
class_def.body = [transformer.visit(node) for node in class_def.body]
class_def.body.extend(static_func_defs)
classes.append(class_def)
module = ast.Module(body=classes, type_ignores=[])
return astunparse.unparse(module) |
class AmazonAddToCart(VirtualFunctionTool):
name = 'AmazonAddToCart'
summary = 'Add a product to the shopping cart.'
parameters: List[ArgParameter] = [{'name': 'product_id', 'type': 'string', 'description': 'The unique identifier of the product.', 'required': True}, {'name': 'quantity', 'type': 'integer', 'description': 'The quantity of the product to add.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the addition was successful.'}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The product with the specified 'product_id' was not found."}, {'name': 'ConflictException', 'description': 'The product is already in the cart.'}] |
class SixConv2DCollapsingTest(BaseConv2DCollapsingTest):
def __init__(self, unit_test):
super().__init__(unit_test)
class Conv2DCollapsingNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1))
self.conv2 = nn.Conv2d(32, 4, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.conv3 = nn.Conv2d(4, 128, kernel_size=(1, 1), stride=(1, 1))
self.conv4 = nn.Conv2d(128, 16, kernel_size=(3, 3), stride=(1, 1))
self.conv5 = nn.Conv2d(16, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.conv6 = nn.Conv2d(64, 8, kernel_size=(1, 1), stride=(1, 1))
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
y = self.relu(x)
return y
def create_networks(self):
return self.Conv2DCollapsingNet() |
def __curve_data_filter__(curve):
none_warning = False
for c in curve.classes:
data_temp = {curve.plot_x_axis: [], curve.plot_y_axis: []}
x_data = curve.data[c][curve.plot_x_axis]
y_data = curve.data[c][curve.plot_y_axis]
for (x, y) in zip(x_data, y_data):
if ((x != 'None') and (y != 'None')):
data_temp[curve.plot_x_axis].append(x)
data_temp[curve.plot_y_axis].append(y)
else:
none_warning = True
curve.data[c] = data_temp
if none_warning:
warn(CURVE_NONE_WARNING, RuntimeWarning) |
def test_sort():
data = ak.Array([[7, 5, 7], [], [2], [8, 2]])
assert (to_list(ak.operations.sort(data)) == [[5, 7, 7], [], [2], [2, 8]]) |
class SchemaAndWordCopyingDecoder(BaseCopyingDecoder):
def __init__(self, delimiter=' ', tokens_feature_name='tokens', length_feature_name='length', source_copy_feature_name='source_copy_indices', schema_copy_feature_name='schema_copy_indices', prepend_token=None, append_token=None):
super(SchemaAndWordCopyingDecoder, self).__init__(delimiter=delimiter, tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token)
self.source_copy_feature_name = source_copy_feature_name
self.schema_copy_feature_name = schema_copy_feature_name
schema_tables = graph_utils.get_dict_from_collection('schema_tables')
self.schema_lookup_table = schema_tables['schema_file_lookup_table']
self.schema_strings_table = schema_tables['all_schema_strings']
def decode(self, data, items):
decoded_items = super(SchemaAndWordCopyingDecoder, self).decode(data, items)
indices = decoded_items.pop('indices')
schema_copies_indices = indices[1]
input_copies_indices = indices[0]
decoded_items[self.schema_copy_feature_name] = schema_copies_indices
decoded_items[self.source_copy_feature_name] = input_copies_indices
return [decoded_items[_] for _ in items]
def _prepend(self, tokens, indices):
(tokens, _) = super(SchemaAndWordCopyingDecoder, self)._prepend(tokens, indices)
schema_copies_indices = indices[1]
input_copies_indices = indices[0]
input_copies_indices = tf.concat([[0], input_copies_indices], 0, name='prepend_to_input_copies_indices')
schema_copies_indices = tf.concat([[0], schema_copies_indices], 0, name='prepend_to_schema_copies_indices')
return (tokens, [input_copies_indices, schema_copies_indices])
def _append(self, tokens, indices):
(tokens, _) = super(SchemaAndWordCopyingDecoder, self)._append(tokens, indices)
schema_copies_indices = indices[1]
input_copies_indices = indices[0]
input_copies_indices = tf.concat([input_copies_indices, [0]], 0, name='append_to_input_copies_indices')
schema_copies_indices = tf.concat([schema_copies_indices, [0]], 0, name='append_to_schema_copies_indices')
return (tokens, [input_copies_indices, schema_copies_indices])
def _mark_all_copies(self, tokens, data):
words = data[1]
(tokens, input_copies_indices) = self._mark_copies(tokens, words, 'COPY_WORD')
schema_location = data[2][0]
schema_id = self.schema_lookup_table.lookup(schema_location)
schema_string = self.schema_strings_table.lookup(schema_id)
schema = tf.string_split(schema_string, delimiter=' ').values
(tokens, schema_copies_indices) = self._mark_copies(tokens, schema, 'COPY_SCHEMA')
return (tokens, [input_copies_indices, schema_copies_indices])
def list_items(self):
items = super(SchemaAndWordCopyingDecoder, self).list_items()
items += [self.schema_copy_feature_name, self.source_copy_feature_name]
return items |
class TestRerouteTensor(test_util.TestCase):
def test_reroute_tensor(self):
net = core.Net('reroute_tensor')
net.Conv(['input', 'w', 'b'], 'conv1')
net.Relu(['conv1'], 'conv1_relu')
new_op = core.CreateOperator('SpatialBN', ['conv1', 'scale', 'bias', 'mean', 'var'], ['conv1_bn', 'mean', 'var', 'saved_mean', 'saved_var'])
net.reroute_tensor('conv1', new_op, [net.Proto().op[1]])
self.assertEqual(new_op, net.Proto().op[1], 'insertion failed')
self.assertEqual(net.Proto().op[2].input[0], 'conv1_bn', 'reroute failed') |
_cache(maxsize=None)
def _read_template(template_fn: str) -> CodeTemplate:
return CodeTemplate.from_file(template_fn) |
class SDPAttention(nn.Module):
def __init__(self, dropout=0, causal=False):
super(SDPAttention, self).__init__()
self.causal = causal
self.dropout = nn.Dropout(dropout)
self.mask_q = None
self.mask_k = None
def set_mask_q(self, masked_tq):
self.mask_q = masked_tq
def set_mask_k(self, masked_tk):
self.mask_k = masked_tk
def forward(self, q, k, v):
(b_q, t_q, dim_q) = list(q.size())
(b_k, t_k, dim_k) = list(k.size())
(b_v, t_v, dim_v) = list(v.size())
assert ((b_q == b_k) and (b_k == b_v))
assert (dim_q == dim_k)
assert (t_k == t_v)
b = b_q
qk = torch.bmm(q, k.transpose(1, 2))
qk = (qk / (dim_k ** 0.5))
mask = None
with torch.no_grad():
if (self.causal and (t_q > 1)):
causal_mask = q.data.new(t_q, t_k).byte().fill_(1).triu_(1)
mask = causal_mask.unsqueeze(0).expand(b, t_q, t_k)
if (self.mask_k is not None):
mask_k = self.mask_k.unsqueeze(1).expand(b, t_q, t_k)
mask = (mask_k if (mask is None) else (mask | mask_k))
if (self.mask_q is not None):
mask_q = self.mask_q.unsqueeze(2).expand(b, t_q, t_k)
mask = (mask_q if (mask is None) else (mask | mask_q))
if (mask is not None):
qk.masked_fill_(mask, float('-inf'))
sm_qk = F.softmax(qk, dim=2, dtype=(torch.float32 if (qk.dtype == torch.float16) else qk.dtype))
sm_qk = self.dropout(sm_qk)
return (torch.bmm(sm_qk, v), sm_qk) |
def lm(batch_size):
model = ('LM (batch size %d)' % batch_size)
command = 'python main.py --cuda --data %s/wikitext2'
command += (' --batch_size %d' % batch_size)
working_directory = 'language_modeling'
num_steps_arg = '--steps'
return JobTemplate(model=model, command=command, working_directory=working_directory, num_steps_arg=num_steps_arg, distributed=True) |
class Q(object):
def __init__(self, list_):
super(Q, self).__init__()
self._list = list_
def __len__(self):
return len(self._list)
def __getitem__(self, key):
return self._list[key]
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self._list == other._list)
else:
return (self._list == other)
def __str__(self):
return str(self._list)
def __repr__(self):
return repr(self._list)
def _append(self, item):
self._list.append(item)
def group(self, selector):
selector = make_selector_fn(selector)
groups = {}
for x in self._list:
group = selector(x)
group_key = hashable(group)
if (group_key not in groups):
groups[group_key] = (group, Q([]))
groups[group_key][1]._append(x)
results = [groups[key] for key in sorted(groups.keys())]
return Q(results)
def group_map(self, selector, fn):
return self.group(selector).map(fn)
def map(self, fn):
if (len(inspect.signature(fn).parameters) > 1):
return Q([fn(*x) for x in self._list])
else:
return Q([fn(x) for x in self._list])
def select(self, selector):
selector = make_selector_fn(selector)
return Q([selector(x) for x in self._list])
def min(self):
return min(self._list)
def max(self):
return max(self._list)
def sum(self):
return sum(self._list)
def len(self):
return len(self._list)
def mean(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return float(np.mean(self._list))
def std(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return float(np.std(self._list))
def mean_std(self):
return (self.mean(), self.std())
def argmax(self, selector):
selector = make_selector_fn(selector)
return max(self._list, key=selector)
def filter(self, fn):
return Q([x for x in self._list if fn(x)])
def filter_equals(self, selector, value):
selector = make_selector_fn(selector)
return self.filter((lambda r: (selector(r) == value)))
def filter_in(self, selector, values):
assert isinstance(values, list)
selector = make_selector_fn(selector)
return self.filter((lambda r: (selector(r) in values)))
def filter_not_none(self):
return self.filter((lambda r: (r is not None)))
def filter_not_nan(self):
return self.filter((lambda r: (not np.isnan(r))))
def flatten(self):
return Q([y for x in self._list for y in x])
def unique(self):
result = []
result_set = set()
for x in self._list:
hashable_x = hashable(x)
if (hashable_x not in result_set):
result_set.add(hashable_x)
result.append(x)
return Q(result)
def sorted(self, key=None):
if (key is None):
key = (lambda x: x)
def key2(x):
x = key(x)
if (isinstance(x, (np.floating, float)) and np.isnan(x)):
return float('-inf')
else:
return x
return Q(sorted(self._list, key=key2)) |
def gaussian_mlp_policy_tf_ppo_benchmarks():
iterate_experiments(gaussian_mlp_policy, MuJoCo1M_ENV_SET, seeds=_seeds) |
def main(dataset_name, task_type, target_size=10000, device='cuda'):
examples = get_examples_for_discriminative_construction(dataset_name=dataset_name)
para_generator = ParaphraseGenerator(device=device)
hallu_generator = HallucinationGenerator(device=device)
save_path = f'constructed_data/{dataset_name}/examples.json'
os.makedirs('/'.join(save_path.split('/')[:(- 1)]), exist_ok=True)
json.dump([], open(save_path, 'w'))
if (task_type == 'dialog_fact'):
seen_facts = {}
results = []
for example in tqdm(examples, desc=f'Constructing'):
if (task_type == 'dialog_fact'):
if (example['fact'] in seen_facts):
continue
else:
seen_facts[example['fact']] = 1
constructed_example = construct(example=example, task_type=task_type, para_generator=para_generator, hallu_generator=hallu_generator, dataset=dataset_name)
if (constructed_example is not None):
results.append(constructed_example)
json.dump(results, open(save_path, 'w'), indent=4)
if ((len(results) % 1000) == 0):
print(f'{len(results)} examples constructed.')
if (len(results) >= target_size):
break |
class AdamW(TorchOptimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.data.mul_((1 - (group['lr'] * group['weight_decay'])))
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss |
class Ripple01(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([0.0] * self.N), ([1.0] * self.N)))
self.global_optimum = [[0.1 for _ in range(self.N)]]
self.fglob = (- 2.2)
def fun(self, x, *args):
self.nfev += 1
u = (((- 2.0) * log(2.0)) * (((x - 0.1) / 0.8) ** 2.0))
v = ((sin(((5.0 * pi) * x)) ** 6.0) + (0.1 * (cos(((500.0 * pi) * x)) ** 2.0)))
return sum(((- exp(u)) * v)) |
class FlaskForm(Form):
class Meta(DefaultMeta):
csrf_class = _FlaskFormCSRF
csrf_context = session
_property
def csrf(self):
return current_app.config.get('WTF_CSRF_ENABLED', True)
_property
def csrf_secret(self):
return current_app.config.get('WTF_CSRF_SECRET_KEY', current_app.secret_key)
_property
def csrf_field_name(self):
return current_app.config.get('WTF_CSRF_FIELD_NAME', 'csrf_token')
_property
def csrf_time_limit(self):
return current_app.config.get('WTF_CSRF_TIME_LIMIT', 3600)
def wrap_formdata(self, form, formdata):
if (formdata is _Auto):
if _is_submitted():
if request.files:
return CombinedMultiDict((request.files, request.form))
elif request.form:
return request.form
elif request.get_json():
return ImmutableMultiDict(request.get_json())
return None
return formdata
def get_translations(self, form):
if (not current_app.config.get('WTF_I18N_ENABLED', True)):
return super(FlaskForm.Meta, self).get_translations(form)
return translations
def __init__(self, formdata=_Auto, **kwargs):
csrf_enabled = kwargs.pop('csrf_enabled', None)
if (csrf_enabled is not None):
warnings.warn(FlaskWTFDeprecationWarning('"csrf_enabled" is deprecated and will be removed in 1.0. Pass meta={\'csrf\': False} instead.'), stacklevel=3)
kwargs['meta'] = (kwargs.get('meta') or {})
kwargs['meta'].setdefault('csrf', csrf_enabled)
super(FlaskForm, self).__init__(formdata=formdata, **kwargs)
def is_submitted(self):
return _is_submitted()
def validate_on_submit(self):
return (self.is_submitted() and self.validate())
def hidden_tag(self, *fields):
def hidden_fields(fields):
for f in fields:
if isinstance(f, string_types):
f = getattr(self, f, None)
if ((f is None) or (not isinstance(f.widget, HiddenInput))):
continue
(yield f)
return Markup(u'\n'.join((text_type(f) for f in hidden_fields((fields or self))))) |
class GraphNode(Node):
def __init__(self, name: str):
self.name: str = name
self.node_type: NodeType = NodeType.MEASURED
self.center_x: int = (- 1)
self.center_y: int = (- 1)
self.attributes = {}
def get_name(self) -> str:
return self.name
def get_node_type(self) -> NodeType:
return self.node_type
def get_center_x(self) -> int:
return self.center_x
def get_center_y(self) -> int:
return self.center_y
def set_name(self, name: str):
if (name is None):
raise TypeError('Name cannot be of NoneType')
self.name = name
def set_node_type(self, node_type: NodeType):
if (node_type is None):
raise TypeError('Node cannot be of NoneType')
self.node_type = node_type
def set_center_x(self, center_x: int):
self.center_x = center_x
def set_center_y(self, center_y: int):
self.center_y = center_y
def set_center(self, center_x: int, center_y: int):
self.center_x = center_x
self.center_y = center_y
def __str__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, GraphNode) and (self.name == other.get_name()))
def __lt__(self, other):
return (self.name < other.name)
def __hash__(self):
return hash(self.name)
def like(self, name: str):
node = GraphNode(name)
node.set_node_type(self.get_node_type())
return node
def get_all_attributes(self):
return self.attributes
def get_attribute(self, key):
return self.attributes[key]
def __getitem__(self, key):
return self.get_attribute(key)
def remove_attribute(self, key):
self.attributes.pop(key)
def __delitem__(self, key):
self.remove_attribute(key)
def add_attribute(self, key, value):
self.attributes[key] = value
def __setitem__(self, key, value):
self.add_attribute(key, value) |
class SingleLayerFunctionalLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
return x |
class JSONWriter(EventWriter):
def __init__(self, json_file, window_size=20):
self.file_handle = open(json_file, 'a')
self.window_size = window_size
def write(self, **kwargs):
storage = get_event_storage()
to_save = {'iteration': (storage.iter + 1)}
to_save.update(storage.latest_with_smoothing_hint(self.window_size))
self.file_handle.write((json.dumps(to_save, sort_keys=True) + '\n'))
self.file_handle.flush()
try:
os.fsync(self.file_handle.fileno())
except AttributeError:
pass
def close(self):
self.file_handle.close() |
class DepTree():
def __init__(self, buff):
self._head2deps = defaultdict(list)
self._dep2head = dict()
self._str = []
for line in buff:
dep_idx = int(line[0])
head_idx = int(line[6])
self.head2deps[head_idx].append(dep_idx)
self.dep2head[dep_idx] = head_idx
self._str.append(line[1])
return
def count_nonprojective(self):
nonproj = []
for dep in self:
head = self.dep2head[dep]
span_min = min(dep, head)
span_max = max(dep, head)
for mid_dep in xrange((span_min + 1), span_max):
mid_head = self.dep2head[mid_dep]
if ((mid_head < span_min) or (mid_head > span_max)):
crossing = True
break
else:
crossing = False
nonproj.append(int(crossing))
return nonproj
def head2deps(self):
return self._head2deps
def dep2head(self):
return self._dep2head
def __iter__(self):
return (dep for dep in self.dep2head)
def __len__(self):
return len(self.dep2head)
def __str__(self):
return (' '.join(self._str) + '\n') |
def class_prior(complementary_labels):
return (np.bincount(complementary_labels) / len(complementary_labels)) |
def layernorm_pytorch_lstm_creator(**kwargs):
(input, hidden, _, module) = lstm_inputs(return_module=True, **kwargs)
batch_size = kwargs['miniBatch']
hidden_size = kwargs['hiddenSize']
ln_i = torch.nn.LayerNorm((4 * hidden_size)).cuda()
ln_h = torch.nn.LayerNorm((4 * hidden_size)).cuda()
ln_c = torch.nn.LayerNorm(hidden_size).cuda()
ln_input1 = torch.randn(batch_size, (4 * hidden_size), device='cuda')
def forward(input, hidden):
(out, new_hidden) = module(input, hidden)
seq_len = len(input.unbind(0))
(hy, cy) = new_hidden
for i in range(seq_len):
ln_i_output = ln_i(ln_input1)
ln_h_output = ln_h(ln_input1)
cy = ln_c(cy)
return (out, (hy, cy))
return ModelDef(inputs=[input, hidden], params=flatten_list(module.all_weights), forward=forward, backward_setup=lstm_backward_setup, backward=None) |
def _test_predictors(self, predictors, overwrite_cfgs, overwrite_in_channels, hwsize):
self.assertGreater(len(predictors), 0)
in_channels_default = 64
for (name, builder) in predictors.items():
print('Testing {}...'.format(name))
if (name in overwrite_cfgs):
cfg = load_config(overwrite_cfgs[name])
else:
cfg = copy.deepcopy(g_cfg)
in_channels = overwrite_in_channels.get(name, in_channels_default)
fe = builder(cfg, in_channels)
(N, C_in, H, W) = (2, in_channels, hwsize, hwsize)
input = torch.rand([N, C_in, H, W], dtype=torch.float32)
out = fe(input)
(yield (input, out, cfg)) |
class Decoder(nn.Module):
def __init__(self, vocab_size, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.embed = Embedder(vocab_size, d_model)
self.pe = PositionalEncoder(d_model, dropout=dropout)
self.layers = get_clones(DecoderLayer(d_model, heads, dropout), N)
self.norm = Norm(d_model)
self.cache = None
def _init_cache(self):
self.cache = {}
for i in range(self.N):
self.cache[('layer_%d' % i)] = {'self_keys': None, 'self_values': None}
def forward(self, trg, e_outputs, src_mask, trg_mask, step=None):
if (step == 1):
self._init_cache()
x = self.embed(trg)
x = self.pe(x, step)
attn_w = []
for i in range(self.N):
layer_cache = (self.cache[('layer_%d' % i)] if (step is not None) else None)
(x, attn) = self.layers[i](x, e_outputs, src_mask, trg_mask, layer_cache=layer_cache)
attn_w.append(attn)
return (self.norm(x), (sum(attn_w) / self.N)) |
def main():
cfg.merge_from_file(args.config)
cur_dir = os.path.dirname(os.path.realpath(__file__))
dataset_root = os.path.join(cur_dir, '../testing_dataset', args.dataset)
model = ModelBuilder()
model = load_pretrain(model, args.snapshot).cuda().eval()
tracker = build_tracker(model)
dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root, load_img=False)
model_name = args.tracker_name
total_lost = 0
if (args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']):
for (v_idx, video) in enumerate(dataset):
if (args.video != ''):
if (video.name != args.video):
continue
frame_counter = 0
lost_number = 0
toc = 0
pred_bboxes = []
for (idx, (img, gt_bbox)) in enumerate(video):
if (len(gt_bbox) == 4):
gt_bbox = [gt_bbox[0], gt_bbox[1], gt_bbox[0], ((gt_bbox[1] + gt_bbox[3]) - 1), ((gt_bbox[0] + gt_bbox[2]) - 1), ((gt_bbox[1] + gt_bbox[3]) - 1), ((gt_bbox[0] + gt_bbox[2]) - 1), gt_bbox[1]]
tic = cv2.getTickCount()
if (idx == frame_counter):
(cx, cy, w, h) = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [(cx - ((w - 1) / 2)), (cy - ((h - 1) / 2)), w, h]
tracker.init(img, gt_bbox_)
pred_bbox = gt_bbox_
pred_bboxes.append(1)
elif (idx > frame_counter):
outputs = tracker.track(img)
pred_bbox = outputs['bbox']
overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0]))
if (overlap > 0):
pred_bboxes.append(pred_bbox)
else:
pred_bboxes.append(2)
frame_counter = (idx + 5)
lost_number += 1
else:
pred_bboxes.append(0)
toc += (cv2.getTickCount() - tic)
if (idx == 0):
cv2.destroyAllWindows()
if (args.vis and (idx > frame_counter)):
cv2.polylines(img, [np.array(gt_bbox, np.int).reshape(((- 1), 1, 2))], True, (0, 255, 0), 3)
if cfg.MASK.MASK:
cv2.polylines(img, [np.array(pred_bbox, np.int).reshape(((- 1), 1, 2))], True, (0, 255, 255), 3)
else:
bbox = list(map(int, pred_bbox))
cv2.rectangle(img, (bbox[0], bbox[1]), ((bbox[0] + bbox[2]), (bbox[1] + bbox[3])), (0, 255, 255), 3)
cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.imshow(video.name, img)
cv2.waitKey(1)
toc /= cv2.getTickFrequency()
video_path = os.path.join('results', args.dataset, model_name, 'baseline', video.name)
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
if isinstance(x, int):
f.write('{:d}\n'.format(x))
else:
f.write((','.join([vot_float2str('%.4f', i) for i in x]) + '\n'))
print('({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format((v_idx + 1), video.name, toc, (idx / toc), lost_number))
total_lost += lost_number
print('{:s} total lost: {:d}'.format(model_name, total_lost))
else:
for (v_idx, video) in enumerate(dataset):
if (args.video != ''):
if (video.name != args.video):
continue
toc = 0
pred_bboxes = []
scores = []
track_times = []
for (idx, (img, gt_bbox)) in enumerate(video):
tic = cv2.getTickCount()
if (idx == 0):
(cx, cy, w, h) = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [(cx - ((w - 1) / 2)), (cy - ((h - 1) / 2)), w, h]
tracker.init(img, gt_bbox_)
pred_bbox = gt_bbox_
scores.append(None)
if ('VOT2018-LT' == args.dataset):
pred_bboxes.append([1])
else:
pred_bboxes.append(pred_bbox)
else:
outputs = tracker.track(img)
pred_bbox = outputs['bbox']
pred_bboxes.append(pred_bbox)
scores.append(outputs['best_score'])
toc += (cv2.getTickCount() - tic)
track_times.append(((cv2.getTickCount() - tic) / cv2.getTickFrequency()))
if (idx == 0):
cv2.destroyAllWindows()
if (args.vis and (idx > 0)):
gt_bbox = list(map(int, gt_bbox))
pred_bbox = list(map(int, pred_bbox))
cv2.rectangle(img, (gt_bbox[0], gt_bbox[1]), ((gt_bbox[0] + gt_bbox[2]), (gt_bbox[1] + gt_bbox[3])), (0, 255, 0), 3)
cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]), ((pred_bbox[0] + pred_bbox[2]), (pred_bbox[1] + pred_bbox[3])), (0, 255, 255), 3)
cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(video.name, img)
cv2.waitKey(1)
toc /= cv2.getTickFrequency()
if ('VOT2018-LT' == args.dataset):
video_path = os.path.join('results', args.dataset, model_name, 'longterm', video.name)
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write((','.join([str(i) for i in x]) + '\n'))
result_path = os.path.join(video_path, '{}_001_confidence.value'.format(video.name))
with open(result_path, 'w') as f:
for x in scores:
(f.write('\n') if (x is None) else f.write('{:.6f}\n'.format(x)))
result_path = os.path.join(video_path, '{}_time.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in track_times:
f.write('{:.6f}\n'.format(x))
elif ('GOT-10k' == args.dataset):
video_path = os.path.join('results', args.dataset, model_name, video.name)
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write((','.join([str(i) for i in x]) + '\n'))
result_path = os.path.join(video_path, '{}_time.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in track_times:
f.write('{:.6f}\n'.format(x))
else:
model_path = os.path.join('results', args.dataset, model_name)
if (not os.path.isdir(model_path)):
os.makedirs(model_path)
result_path = os.path.join(model_path, '{}.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write((','.join([str(i) for i in x]) + '\n'))
print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format((v_idx + 1), video.name, toc, (idx / toc))) |
class LocalStack(object):
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def __ident_func__(self):
return self._local.__ident_func__
__ident_func__.setter
def __ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
def __call__(self):
def _lookup():
rv = self.top
if (rv is None):
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
rv = getattr(self._local, 'stack', None)
if (rv is None):
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
stack = getattr(self._local, 'stack', None)
if (stack is None):
return None
elif (len(stack) == 1):
release_local(self._local)
return stack[(- 1)]
else:
return stack.pop()
def top(self):
try:
return self._local.stack[(- 1)]
except (AttributeError, IndexError):
return None |
class DatasetTests(unittest.TestCase):
def setUpClass(cls):
cls.args = {'num_examples': 1000, 'num_clusters': 10, 'num_dims': 2, 'equal_clusters': False, 'min_clust_size': 5}
Dataset.global_rng = np.random.RandomState(42)
def tearDownClass(cls):
Dataset.global_rng = None
def test_equal_clusters_are_equal(self):
kwargs = self.args.copy()
kwargs['num_examples'] = 7883
kwargs['num_clusters'] = 17
kwargs['equal_clusters'] = True
obj = Dataset(**kwargs)
for size in obj.cluster_sizes:
with self.subTest(size=size):
self.assertEqual(size, 464)
def test_random_cluster_sizes_sum(self):
kwargs = self.args.copy()
kwargs['num_examples'] = 10000
kwargs['num_clusters'] = 100
obj = Dataset(**kwargs)
sizes_sum = np.sum(obj.cluster_sizes)
close_to_sum = (np.around(sizes_sum, decimals=(- 1)) == kwargs['num_examples'])
self.assertTrue(close_to_sum)
def test_random_cluster_sizes_different(self):
kwargs = self.args.copy()
kwargs['num_examples'] = 10000
kwargs['num_clusters'] = 100
obj = Dataset(**kwargs)
unique_sizes = set()
for size in obj.cluster_sizes:
unique_sizes.add(size)
self.assertTrue((len(unique_sizes) > 1))
def test_incorrect_min_clust_size(self):
kwargs = self.args.copy()
kwargs['num_examples'] = 100
kwargs['num_clusters'] = 10
kwargs['min_clust_size'] = 20
obj = Dataset(**kwargs)
self.assertLessEqual(obj.min_clust_size, (obj.num_examples / obj.num_clusters))
def test_exact_min_clust_size(self):
kwargs = self.args.copy()
kwargs['num_examples'] = 200
kwargs['num_clusters'] = 5
kwargs['min_clust_size'] = 40
obj = Dataset(**kwargs)
self.assertEqual(obj.cluster_sizes, ([40] * 5)) |
def _forward_gravity(receivers, nodes, densities, fields, cell_nodes, kernel_func, constant_factor):
n_receivers = receivers.shape[0]
n_nodes = nodes.shape[0]
n_cells = cell_nodes.shape[0]
for i in prange(n_receivers):
kernels = np.empty(n_nodes)
for j in range(n_nodes):
kernels[j] = _evaluate_kernel(receivers[(i, 0)], receivers[(i, 1)], receivers[(i, 2)], nodes[(j, 0)], nodes[(j, 1)], nodes[(j, 2)], kernel_func)
for k in range(n_cells):
fields[i] += ((constant_factor * densities[k]) * _kernels_in_nodes_to_cell(kernels, cell_nodes[(k, 0)], cell_nodes[(k, 1)], cell_nodes[(k, 2)], cell_nodes[(k, 3)], cell_nodes[(k, 4)], cell_nodes[(k, 5)], cell_nodes[(k, 6)], cell_nodes[(k, 7)])) |
def TaylorTwographSRG(q):
(G, l, v0) = TaylorTwographDescendantSRG(q, clique_partition=True)
G.add_vertex(v0)
G.seidel_switching(sum(l[:(((q ** 2) + 1) / 2)], []))
G.name('Taylor two-graph SRG')
return G |
class BEVFusion_lidar(Base3DFusionModel):
def __init__(self, C, xbound, ybound, zbound) -> None:
super().__init__()
f = open('model/bevfusion/lidar-centerpoint-bev128.yaml', 'r')
cfg = yaml.safe_load(f)
encoders = cfg['model']['encoders']
decoder = cfg['model']['decoder']
heads = cfg['model']['heads']
self.xbound = xbound
self.ybound = ybound
self.zbound = zbound
self.encoders = nn.ModuleDict()
self.encoders['lidar'] = nn.ModuleDict({'voxelize': Voxelization(**encoders['lidar']['voxelize']), 'backbone': build_backbone(encoders['lidar']['backbone'])})
self.voxelize_reduce = encoders['lidar'].get('voxelize_reduce', True)
self.decoder = nn.ModuleDict({'backbone': build_backbone(decoder['backbone']), 'neck': build_neck(decoder['neck'])})
self.heads = BEVSegmentationHead(heads['map']['in_channels'], heads['map']['grid_transform'], heads['map']['out_channels'])
self.conv_out = nn.Sequential(nn.Conv2d(512, 256, 3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 128, 3, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, C, 1))
def extract_lidar_features(self, x, mask) -> torch.Tensor:
(feats, coords, sizes) = self.voxelize(x, mask)
batch_size = (coords[((- 1), 0)] + 1)
x = self.encoders['lidar']['backbone'](feats, coords, batch_size, sizes=sizes)
return x
_grad()
_fp32()
def voxelize(self, points, mask):
(feats, coords, sizes) = ([], [], [])
num_points = mask.sum(dim=1)
for (k, res) in enumerate(points):
(f, c, n) = self.encoders['lidar']['voxelize'](res)
feats.append(f)
coords.append(F.pad(c, (1, 0), mode='constant', value=k))
sizes.append(n)
feats = torch.cat(feats, dim=0)
coords = torch.cat(coords, dim=0)
sizes = torch.cat(sizes, dim=0)
if self.voxelize_reduce:
feats = (feats.sum(dim=1, keepdim=False) / sizes.type_as(feats).view((- 1), 1))
feats = feats.contiguous()
return (feats, coords, sizes)
_fp16(apply_to=('img', 'points'))
def forward(self, lidar_data, lidar_mask):
voxel_feature_ori = self.extract_lidar_features(lidar_data, lidar_mask)
voxel_feature = self.decoder['backbone'](voxel_feature_ori)
voxel_feature = self.decoder['neck'](voxel_feature)
voxel_feature = self.heads(voxel_feature)
return (self.conv_out(voxel_feature).transpose(3, 2), voxel_feature_ori.transpose(3, 2)) |
_LAYERS.register_module()
class HSigmoid(nn.Module):
def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
super(HSigmoid, self).__init__()
self.bias = bias
self.divisor = divisor
assert (self.divisor != 0)
self.min_value = min_value
self.max_value = max_value
def forward(self, x):
x = ((x + self.bias) / self.divisor)
return x.clamp_(self.min_value, self.max_value) |
class SEGating(nn.Module):
def __init__(self, inplanes, reduction=16):
super().__init__()
self.pool = nn.AdaptiveAvgPool3d(1)
self.attn_layer = nn.Sequential(nn.Conv3d(inplanes, inplanes, kernel_size=1, stride=1, bias=True), nn.Sigmoid())
def forward(self, x):
out = self.pool(x)
y = self.attn_layer(out)
return (x * y) |
class AnalyticalSolver(COPSolver):
def solve(self, gradients):
if (gradients is None):
raise TypeError('Argument: gradients type cannot be None')
if (len(gradients) != 2):
raise ValueError('Argument: The number of gradients must be equal to 2')
if (len(gradients[0]) != len(gradients[1])):
raise ValueError('Argument: The gradients must have the same length')
if (gradients[0] == gradients[1]).all():
return [0.5, 0.5]
'\n .. math::\n\n \\alpha = \\frac{(\\nabla_{w}L_{2}(w) - \\nabla_{w}L_{1})^{T} \\star \\nabla_{w}L_{2}(w)}\n {\\|\\nabla_{w}L_{1} - \\nabla_{w}L_{2}\\|^{2}}\n\n Source: Multi-Gradient Descent For Multi-Objective Recommender Systems\n '
alpha = (((gradients[1] - gradients[0]) gradients[1]) / ((gradients[0] - gradients[1]) (gradients[0] - gradients[1])))
if (alpha < 0):
alpha = 0
if (alpha > 1):
alpha = 1
return [alpha, (1 - alpha)] |
def tonumpy(data):
if isinstance(data, np.ndarray):
return data
if isinstance(data, t._C._TensorBase):
return data.cpu().numpy()
if isinstance(data, t.autograd.Variable):
return tonumpy(data.data) |
def _get_funcs(names, arrays, dtype, lib_name, fmodule, cmodule, fmodule_name, cmodule_name, alias, ilp64=False):
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
(prefix, dtype, prefer_fortran) = find_best_blas_type(arrays, dtype)
if prefer_fortran:
(module1, module2) = (module2, module1)
for name in names:
func_name = (prefix + name)
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if (func is None):
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if (func is None):
raise ValueError(f'{lib_name} function {func_name} could not be found')
(func.module_name, func.typecode) = (module_name, prefix)
func.dtype = dtype
if (not ilp64):
func.int_dtype = _np.dtype(_np.intc)
else:
func.int_dtype = _np.dtype(_np.int64)
func.prefix = prefix
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs |
def out_names(inputs):
(question, context) = inputs.split('[SEP]')
d = pmodel.tokenizer(question, context)
return [pmodel.tokenizer.decode([id]) for id in d['input_ids']] |
class FakePolicy(Policy):
def compute_action(self, observation, act_mask, evaluate: bool, hidden_state: Any=None, **kwargs):
super().compute_action(observation, act_mask, evaluate, hidden_state, **kwargs)
return 'checked'
def coordinate(self, state, message: Any) -> Any:
super().coordinate(state, message)
return 'checked' |
def load_examples(path, split, verbose=False):
print(f'Split: {split.upper()}')
with open(os.path.join(path, 'female_occupations.txt')) as f:
female_occupations = [row.lower().strip() for row in f]
with open(os.path.join(path, 'male_occupations.txt')) as f:
male_occupations = [row.lower().strip() for row in f]
occupations = (female_occupations + male_occupations)
fname = f'pro_stereotyped_type1.txt.{split}'
with open(os.path.join(path, fname)) as f:
examples = []
row_pair = []
skip_count = 0
for row in f:
row_pair.append(row)
if (len(row_pair) == 2):
skip = False
if ((row_pair[0].count('[') != 2) or (row_pair[1].count('[') != 2)):
skip = True
elif ('[him]' in (row_pair[0] + row_pair[1])):
skip = True
else:
(base_string1, substitutes1, continuation1, occupation1) = _parse_row(row_pair[0], occupations)
(base_string2, substitutes2, continuation2, occupation2) = _parse_row(row_pair[1], occupations)
if ((base_string1 != base_string2) or (substitutes1 != substitutes2)):
skip = True
if skip:
if verbose:
print('Skipping: ', row_pair)
skip_count += 1
row_pair = []
continue
base_string = base_string1
assert (substitutes1 == substitutes2)
(female_pronoun, male_pronoun) = substitutes1
assert ((len(continuation1) > 0) and (len(continuation2) > 0) and (continuation1 != continuation2))
assert ((len(occupation1) > 0) and (len(occupation2) > 0) and (occupation1 != occupation2))
if (occupation1 in female_occupations):
female_occupation = occupation1
female_occupation_continuation = continuation1
male_occupation = occupation2
male_occupation_continuation = continuation2
assert (occupation2 in male_occupations)
else:
male_occupation = occupation1
male_occupation_continuation = continuation1
female_occupation = occupation2
female_occupation_continuation = continuation2
assert (occupation1 in male_occupations)
assert (occupation2 in female_occupations)
examples.append(WinobiasExample(base_string, female_pronoun, male_pronoun, female_occupation, male_occupation, female_occupation_continuation, male_occupation_continuation))
row_pair = []
assert (row_pair == [])
print(f'Loaded {len(examples)} pairs. Skipped {skip_count} pairs.')
return examples |
def test_set_get_value(atomic_integer_null):
assert (atomic_integer_null.value == 0)
atomic_integer_null.value = 23
assert (atomic_integer_null.value == 23) |
def configuration_to_dict(handlers):
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, ('get_%s' % option), None)
if (getter is None):
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict |
_utils.test(require=ti.extension.mesh, demote_no_access_mesh_fors=True)
def test_multiple_meshes():
mesh_builder = ti.lang.mesh._TetMesh()
mesh_builder.verts.place({'y': ti.i32})
meta = ti.Mesh.load_meta(model_file_path)
model1 = mesh_builder.build(meta)
model2 = mesh_builder.build(meta)
model1.verts.y.from_numpy(np.array([(x ** 2) for x in range(len(model1.verts))]))
def foo():
for v in model1.verts:
model2.verts.y[v.id] = v.y
foo()
out = model2.verts.y.to_numpy()
for i in range(len(out)):
assert (out[i] == (i ** 2)) |
class TypeConverter():
def tensor_2_numpy_gpu(data):
return data.cpu().numpy()
def tensor_2_numpy(data):
return data.numpy()
def image_tensor_2_cv(data):
img = TypeConverter.tensor_2_numpy(data)
img = (img.transpose([1, 2, 0]) + config['pixel_mean'])
img = np.clip(img, 0, 255).astype(np.uint8)
return img
def image_tensor_2_cv_gpu(data):
return TypeConverter.image_tensor_2_cv(data.cpu()) |
def _sys_git_stat_local_rev(repo, rev):
main_path = _main_repo_path(repo)
try:
out = check_output(['git', '-c', 'log.showsignature=false', 'log', '-n1', '--format=format:%H %cd', ('--date=format:%s' % _DateFormat), rev, '--'], cwd=main_path)
except SubprocessError:
return (None, None)
out = out.decode('utf8')
(full_rev, date) = out.split()
assert (full_rev.startswith(rev) and (len(full_rev) == _FullNumHashDigits))
_simple_validate_commit_rev(full_rev)
_simple_validate_date(date)
return (full_rev, date) |
def set_quantizer_by_name(model, names, **kwargs):
for (name, mod) in model.named_modules():
if (hasattr(mod, '_input_quantizer') or hasattr(mod, '_weight_quantizer')):
for n in names:
if re.search(n, name):
set_quantizers(name, mod, **kwargs)
elif name.endswith('_quantizer'):
for n in names:
if re.search(n, name):
s = f'Warning: changing {name:{name_width}}'
for (k, v) in kwargs.items():
s += f' {k}={v}'
setattr(mod, k, v)
logger.info(s) |
class IMECDecoder():
def __init__(self, medium, block_size=None, n_chunks=None, last_block_size=None, use_header=False, **kwargs):
self.use_header = use_header
self.medium = medium
self.context = kwargs.get('context', None)
self.block_size = block_size
self.send_block_size_header = kwargs.get('send_block_size_header', None)
self.send_n_chunks_header = kwargs.get('send_n_chunks_header', True)
self.pad_last_belief_chunk = kwargs.get('pad_last_belief_chunk', True)
self.last_block_size = last_block_size
self.n_chunks = n_chunks
if (not self.pad_last_belief_chunk):
assert ((last_block_size is not None) and (not use_header)), 'need to set last_block_size and cannot use header if pad_last_belief_chunk is being used!'
self.mec_mode = kwargs.get('mec_mode', 'dense')
self.mec_atol = kwargs.get('mec_atol', 1e-07)
self.mec_warning_atol = kwargs.get('mec_warning_atol', 1e-05)
self.belief_entropy_threshold = kwargs.get('belief_entropy_threshold', 1e-09)
self.clean_up_output = kwargs.get('clean_up_output', False)
self.header_bit_sizes = kwargs.get('header_bit_size', (4, 8))
self.header_block_size = kwargs.get('header_block_size', 4)
self.header_belief_entropy_threshold = kwargs.get('header_belief_entropy_threshold', 1e-09)
if (not use_header):
assert (block_size is not None), 'If header is not used, need to set chunk size!'
assert (n_chunks is not None), 'If header is not used, need to set n_chunks!'
pass
def decode(self, public_message_token, context: str=None, verbose: bool=False, text=None, **kwargs):
(probs, info) = self.medium.reset(context=context)
msgt_header_offset = 0
block_sizes = None
block_sizes = ([self.block_size] * self.n_chunks)
if (not self.pad_last_belief_chunk):
block_sizes += [self.last_block_size]
if self.pad_last_belief_chunk:
beliefs = [(np.zeros((2 ** self.block_size), dtype=np.longdouble) + (1.0 / (2 ** self.block_size))) for (k, _) in enumerate(block_sizes)]
else:
beliefs = [(np.zeros((2 ** cs), dtype=np.longdouble) + (1.0 / (2 ** cs))) for (k, cs) in enumerate(block_sizes)]
stats_traj = defaultdict(list)
stats = {'public_message_len': len(public_message_token)}
t_iter_1 = None
for msg_token in public_message_token:
if verbose:
print('DEC PROBS:', probs[:5])
belief_entropies = np.array([entropy2(b) for b in beliefs])
next_chunk_id = np.argmax(belief_entropies)
try:
next_action = self.medium.action_labels.cpu().tolist().index(msg_token)
except:
a = 5
raise ActionLabelException(action_labels=self.medium.action_labels.cpu().tolist(), msg_token=msg_token, msg_tokens=public_message_token, message='Not in list!')
mec_dict = minimum_entropy_coupling(beliefs[next_chunk_id], probs, select_row=None, select_col=next_action, method='kocaoglu', mode=self.mec_mode, algo_atol=self.mec_atol, warning_atol=self.mec_warning_atol)
vec2 = mec_dict['M_selected_col']
if (t_iter_1 is not None):
delta_t_step_no_medium = (time.time() - t_iter_1)
stats_traj['dec_t_step_no_medium'].append(delta_t_step_no_medium)
t_medium_1 = time.time()
(probs, info) = self.medium.step(self.medium.action_labels[next_action])
delta_t_medium = (time.time() - t_medium_1)
stats_traj['dec_t_medium_per_step'].append(delta_t_medium)
t_iter_1 = time.time()
beliefs[next_chunk_id] = (vec2 / vec2.sum())
for (k, v) in stats_traj.items():
stats[(k + '/mean')] = np.array(v).mean()
stats[(k + '/std')] = np.array(v).std()
stats[(k + '/80')] = np.sort(np.array(v))[int((len(v) * 0.8))]
stats[(k + '/20')] = np.sort(np.array(v))[int((len(v) * 0.2))]
stats[(k + '/95')] = np.sort(np.array(v))[int((len(v) * 0.95))]
stats[(k + '/5')] = np.sort(np.array(v))[int((len(v) * 0.05))]
output = [format(np.argmax(b), '0{}b'.format(cs)) for (b, cs) in zip(beliefs, block_sizes)]
output = bitarray.bitarray(''.join(output))
return (output, stats) |
def env_loader(env_name: str, run_number: int, dataset_dir: str, stack_size: int=4, data_percentage: int=10, trajectory_fn: Optional[Callable]=None, shuffle_num_episodes: int=1000, shuffle_num_steps: int=50000, trajectory_length: int=10, **_: Any) -> Tuple[(dm_env.Environment, tf.data.Dataset)]:
return (environment(game=env_name, stack_size=stack_size), create_atari_ds_loader(env_name=env_name, run_number=run_number, dataset_dir=dataset_dir, stack_size=stack_size, data_percentage=data_percentage, trajectory_fn=trajectory_fn, shuffle_num_episodes=shuffle_num_episodes, shuffle_num_steps=shuffle_num_steps, trajectory_length=trajectory_length)) |
_model
def ese_vovnet19b_slim_dw(pretrained=False, **kwargs):
return _vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) |
def test_inlinepp_stateful():
ctr = 11
def stateful():
nonlocal ctr
ctr += 1
return ctr
def tester(a: dace.float64[3]):
a[0] = dace.inline(stateful())
a[1] = dace.inline(stateful())
a[2] = dace.inline((stateful() * 2))
sdfg = tester.to_sdfg()
assert _find_in_tasklet(sdfg, '12')
assert _find_in_tasklet(sdfg, '13')
assert _find_in_tasklet(sdfg, '28')
a = np.random.rand(3)
sdfg(a)
assert np.allclose(a, np.array([12, 13, 28])) |
def _eval_op(lhs, op, rhs):
try:
spec = Specifier(''.join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if (oper is None):
raise UndefinedComparison('Undefined {0!r} on {1!r} and {2!r}.'.format(op, lhs, rhs))
return oper(lhs, rhs) |
class GoogleMapGetCurrentLocation(VirtualFunctionTool):
name = 'GoogleMapGetCurrentLocation'
summary = 'Get the current location of the user.'
parameters: List[ArgParameter] = []
returns: List[ArgReturn] = [{'name': 'location_address', 'type': 'string', 'description': "The current location of the user in the format of 'street address, city, zip code'."}]
exceptions: List[ArgException] = [] |
def followstrand(f, factors, x0, x1, y0a, prec=53):
if (f.degree() == 1):
CF = ComplexField(prec)
g = f.change_ring(CF)
(x, y) = g.parent().gens()
y0 = CF[y](g.subs({x: x0})).roots()[0][0]
y1 = CF[y](g.subs({x: x1})).roots()[0][0]
res = [(0.0, y0.real(), y0.imag()), (1.0, y1.real(), y1.imag())]
return res
CIF = ComplexIntervalField(prec)
CC = ComplexField(prec)
G = f.change_ring(QQbar).change_ring(CIF)
(x, y) = G.parent().gens()
g = G.subs({x: (((1 - x) * CIF(x0)) + (x * CIF(x1)))})
coefs = []
deg = g.total_degree()
for d in range((deg + 1)):
for i in range((d + 1)):
c = CIF(g.coefficient({x: (d - i), y: i}))
cr = c.real()
ci = c.imag()
coefs += list(cr.endpoints())
coefs += list(ci.endpoints())
yr = CC(y0a).real()
yi = CC(y0a).imag()
coefsfactors = []
degsfactors = []
for fc in factors:
degfc = fc.degree()
degsfactors.append(degfc)
G = fc.change_ring(QQbar).change_ring(CIF)
g = G.subs({x: (((1 - x) * CIF(x0)) + (x * CIF(x1)))})
for d in range((degfc + 1)):
for i in range((d + 1)):
c = CIF(g.coefficient({x: (d - i), y: i}))
cr = c.real()
ci = c.imag()
coefsfactors += list(cr.endpoints())
coefsfactors += list(ci.endpoints())
from sage.libs.sirocco import contpath, contpath_mp, contpath_comps, contpath_mp_comps
try:
if (prec == 53):
if factors:
points = contpath_comps(deg, coefs, yr, yi, degsfactors, coefsfactors)
else:
points = contpath(deg, coefs, yr, yi)
elif factors:
points = contpath_mp_comps(deg, coefs, yr, yi, prec, degsfactors, coefsfactors)
else:
points = contpath_mp(deg, coefs, yr, yi, prec)
return points
except Exception:
return followstrand(f, factors, x0, x1, y0a, (2 * prec)) |
class Config(object):
NAME = None
GPU_COUNT = 1
IMAGES_PER_GPU = 1
STEPS_PER_EPOCH = 1000
VALIDATION_STEPS = 10
BACKBONE = 'resnet101'
COMPUTE_BACKBONE_SHAPE = None
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
TOP_DOWN_PYRAMID_SIZE = 256
NUM_CLASSES = 1
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
RPN_ANCHOR_STRIDE = 1
RPN_NMS_THRESHOLD = 0.0001
RPN_TRAIN_ANCHORS_PER_IMAGE = 768
PRE_NMS_LIMIT = 6000
POST_NMS_ROIS_TRAINING = 3500
POST_NMS_ROIS_INFERENCE = 2500
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56)
IMAGE_RESIZE_MODE = 'square'
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
IMAGE_MIN_SCALE = 0
IMAGE_CHANNEL_COUNT = 3
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
TRAIN_ROIS_PER_IMAGE = 512
SAMPLES_PER_VERTEX = 10
LSTM_DEPTH = 256
GRAPH_NEIGHBORS = 10
ROI_POSITIVE_RATIO = 0.6
POOL_SIZE = 7
MASK_POOL_SIZE = 14
MASK_SHAPE = [28, 28]
MAX_GT_INSTANCES = 2000
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
DETECTION_MAX_INSTANCES = 2000
DETECTION_MIN_CONFIDENCE = 0.3
DETECTION_NMS_THRESHOLD = 0.25
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
WEIGHT_DECAY = 0.0001
LOSS_WEIGHTS = {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0, 'roi_alignment_loss': 1, 'row_adj_loss': 1, 'col_adj_loss': 1}
USE_RPN_ROIS = True
TRAIN_BN = False
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
self.BATCH_SIZE = (self.IMAGES_PER_GPU * self.GPU_COUNT)
if (self.IMAGE_RESIZE_MODE == 'crop'):
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, self.IMAGE_CHANNEL_COUNT])
self.IMAGE_META_SIZE = (((((1 + 3) + 3) + 4) + 1) + self.NUM_CLASSES)
def display(self):
print('\nConfigurations:')
for a in dir(self):
if ((not a.startswith('__')) and (not callable(getattr(self, a)))):
print('{:30} {}'.format(a, getattr(self, a)))
print('\n') |
class Generator(nn.Module):
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6):
super(Generator, self).__init__()
layers = []
layers.append(nn.Conv2d((3 + c_dim), conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, (curr_dim * 2), kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d((curr_dim * 2), affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = (curr_dim * 2)
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, (curr_dim // 2), kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d((curr_dim // 2), affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = (curr_dim // 2)
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c):
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x) |
def _cf_string_to_unicode(value):
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(value_as_void_p, CFConst.kCFStringEncodingUTF8)
if (string is None):
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8)
if (not result):
raise OSError('Error copying C string from CFStringRef')
string = buffer.value
if (string is not None):
string = string.decode('utf-8')
return string |
class SimdjsonRepository():
def __init__(self, project_path: str, relative_roots: List[RelativeRoot]):
self.project_path = project_path
self.relative_roots = relative_roots
self.files: Dict[(str, SimdjsonFile)] = {}
def validate_free_dependency_files(self):
for file in self:
file.validate_free_dependency_file()
def __len__(self):
return len(self.files)
def __contains__(self, include_path: Union[(str, SimdjsonFile)]):
if isinstance(include_path, SimdjsonFile):
return (include_path.include_path in self.files)
else:
return (include_path in self.files)
def __getitem__(self, include_path: str):
if (include_path not in self.files):
root = self._included_filename_root(include_path)
if (not root):
return None
self.files[include_path] = SimdjsonFile(self, root, include_path)
return self.files[include_path]
def __iter__(self):
return iter(self.files.values())
def _included_filename_root(self, filename: str):
result = None
for relative_root in self.relative_roots:
if os.path.exists(os.path.join(self.project_path, relative_root, filename)):
assert (result is None), '{file} exists in both {result} and {root}!'
result = relative_root
return result
def validate_all_files_used(self, root: RelativeRoot):
assert (root in self.relative_roots)
absolute_root = os.path.join(self.project_path, root)
all_files = set([os.path.relpath(os.path.join(dir, file).replace('\\', '/'), absolute_root) for (dir, _, files) in os.walk(absolute_root) for file in files if (file.endswith('.h') or file.endswith('.cpp'))])
used_files = set([file.include_path for file in self if (file.root == root)])
all_files.difference_update(used_files)
all_files.difference_update(DEPRECATED_FILES)
assert (len(all_files) == 0), f'Files not used: {sorted(all_files)}' |
def cdist(XA, XB, metric='euclidean', *, out=None, **kwargs):
XA = np.asarray(XA)
XB = np.asarray(XB)
s = XA.shape
sB = XB.shape
if (len(s) != 2):
raise ValueError('XA must be a 2-dimensional array.')
if (len(sB) != 2):
raise ValueError('XB must be a 2-dimensional array.')
if (s[1] != sB[1]):
raise ValueError('XA and XB must have the same number of columns (i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
if callable(metric):
mstr = getattr(metric, '__name__', 'Unknown')
metric_info = _METRIC_ALIAS.get(mstr, None)
if (metric_info is not None):
(XA, XB, typ, kwargs) = _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs)
return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs)
elif isinstance(metric, str):
mstr = metric.lower()
metric_info = _METRIC_ALIAS.get(mstr, None)
if (metric_info is not None):
cdist_fn = metric_info.cdist_func
return cdist_fn(XA, XB, out=out, **kwargs)
elif mstr.startswith('test_'):
metric_info = _TEST_METRICS.get(mstr, None)
if (metric_info is None):
raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
(XA, XB, typ, kwargs) = _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs)
return _cdist_callable(XA, XB, metric=metric_info.dist_func, out=out, **kwargs)
else:
raise ValueError(('Unknown Distance Metric: %s' % mstr))
else:
raise TypeError('2nd argument metric must be a string identifier or a function.') |
def get_dataset(bop_dir, dataset, train=True, incl_param=False, eval_model=False, data_folder='None', data_per_obj=False, train_obj_visible_theshold=0.1):
if eval_model:
postfix_model = '_eval'
else:
postfix_model = ''
bop_dataset_dir = os.path.join(bop_dir, dataset)
target_dir = os.path.join(bop_dataset_dir, data_folder)
model_dir = ((bop_dataset_dir + '/models') + postfix_model)
model_info = inout.load_json(os.path.join(model_dir, 'models_info.json'))
if (dataset == 'ycbv'):
cam_param_global = inout.load_cam_params(os.path.join(bop_dataset_dir, 'camera_uw.json'))
elif ((dataset == 'tless') or (dataset == 'hb')):
cam_param_global = inout.load_cam_params(os.path.join(bop_dataset_dir, 'camera_primesense.json'))
else:
cam_param_global = inout.load_cam_params(os.path.join(bop_dataset_dir, 'camera.json'))
im_size = np.array(cam_param_global['im_size'])[::(- 1)]
model_plys = {}
model_ids = []
for model_id in model_info.keys():
ply_fn = os.path.join(model_dir, 'obj_{:06d}.ply'.format(int(model_id)))
if os.path.exists(ply_fn):
model_ids.append(int(model_id))
model_ids = np.sort(np.array(model_ids))
for model_id in model_ids:
ply_fn = os.path.join(model_dir, 'obj_{:06d}.ply'.format(int(model_id)))
model_plys[int(model_id)] = ply_fn
print(ply_fn)
print('if models are not fully listed above, please make sure there are ply files available')
rgb_files_dataset = []
depth_files_dataset = []
mask_files_dataset = []
mask_visib_files_dataset = []
gts_dataset = []
gt_infos_dataset = []
params_dataset = []
max_id = model_ids.max()
if (dataset == 'lmo'):
max_id = 15
rgb_files_per_obj = [[] for x in range(max_id)]
depth_files_per_obj = [[] for x in range(max_id)]
mask_files_per_obj = [[] for x in range(max_id)]
mask_visib_files_per_obj = [[] for x in range(max_id)]
gts_per_obj = [[] for x in range(max_id)]
gt_infos_per_obj = [[] for x in range(max_id)]
params_per_obj = [[] for x in range(max_id)]
if os.path.exists(target_dir):
for dir in os.listdir(target_dir):
current_dir = ((target_dir + '/') + dir)
if os.path.exists(os.path.join(current_dir, 'scene_camera.json')):
scene_params = inout.load_scene_camera(os.path.join(current_dir, 'scene_camera.json'))
scene_gt_fn = os.path.join(current_dir, 'scene_gt.json')
scene_gt_info_fn = os.path.join(current_dir, 'scene_gt_info.json')
has_gt = False
if (os.path.exists(scene_gt_fn) and os.path.exists(scene_gt_info_fn)):
scene_gts = inout.load_scene_gt(scene_gt_fn)
scene_gt_infos = inout.load_scene_gt(scene_gt_info_fn)
has_gt = True
for img_id in sorted(scene_params.keys()):
im_id = int(img_id)
if ((dataset == 'itodd') and (not train)):
rgb_fn = os.path.join((current_dir + '/gray'), '{:06d}.tif'.format(im_id))
else:
rgb_fn = os.path.join((current_dir + '/rgb'), '{:06d}.png'.format(im_id))
depth_fn = os.path.join((current_dir + '/depth'), '{:06d}.png'.format(im_id))
if (not os.path.exists(rgb_fn)):
rgb_fn_no_surfix = rgb_fn[:(- 4)]
rgb_fn = (rgb_fn_no_surfix + '.jpg')
if data_per_obj:
visib_thershold = 0.1
if train:
visib_thershold = train_obj_visible_theshold
gts = scene_gts[im_id]
for (counter, gt) in enumerate(gts):
visib_fract = scene_gt_infos[im_id][counter]['visib_fract']
if (visib_fract > visib_thershold):
obj_id = int((gt['obj_id'] - 1))
mask_fn = os.path.join((current_dir + '/mask'), '{:06d}_{:06d}.png'.format(im_id, counter))
mask_visib_fn = os.path.join((current_dir + '/mask_visib'), '{:06d}_{:06d}.png'.format(im_id, counter))
rgb_files_per_obj[obj_id].append(rgb_fn)
depth_files_per_obj[obj_id].append(depth_fn)
mask_files_per_obj[obj_id].append([mask_fn])
mask_visib_files_per_obj[obj_id].append([mask_visib_fn])
if has_gt:
gts_per_obj[obj_id].append(gt)
gt_infos_per_obj[obj_id].append(scene_gt_infos[im_id][counter])
params_per_obj[obj_id].append(scene_params[im_id])
else:
rgb_files_dataset.append(rgb_fn)
depth_files_dataset.append(depth_fn)
if has_gt:
gts_dataset.append(scene_gts[im_id])
gt_infos_dataset.append(scene_gt_infos[im_id])
params_dataset.append(scene_params[im_id])
mask_fns = []
mask_visib_fns = []
for (counter, gt) in enumerate(scene_gts[im_id]):
mask_fn = os.path.join((current_dir + '/mask'), '{:06d}_{:06d}.png'.format(im_id, counter))
mask_visib_fn = os.path.join((current_dir + '/mask_visib'), '{:06d}_{:06d}.png'.format(im_id, counter))
mask_fns.append(mask_fn)
mask_visib_fns.append(mask_visib_fn)
mask_files_dataset.append(mask_fns)
mask_visib_files_dataset.append(mask_visib_fns)
if data_per_obj:
rgb_files = rgb_files_per_obj
depth_files = depth_files_per_obj
mask_files = mask_files_per_obj
mask_visib_files = mask_visib_files_per_obj
gts = gts_per_obj
gt_infos = gt_infos_per_obj
params = params_per_obj
else:
rgb_files = rgb_files_dataset
depth_files = depth_files_dataset
mask_files = mask_files_dataset
mask_visib_files = mask_files_dataset
gts = gts_dataset
gt_infos = gt_infos_dataset
params = params_dataset
if incl_param:
return (bop_dataset_dir, target_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, mask_visib_files, gts, gt_infos, cam_param_global, params)
else:
return (bop_dataset_dir, target_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, mask_visib_files, gts, gt_infos, cam_param_global) |
def test_graph_reverse_cuthill_mckee():
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 1], [0, 1, 1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
graph = csr_matrix(A)
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
assert_equal(perm, correct_perm)
graph.indices = graph.indices.astype('int64')
graph.indptr = graph.indptr.astype('int64')
perm = reverse_cuthill_mckee(graph, True)
assert_equal(perm, correct_perm) |
def _act_backward(ctx, x, dx):
if (ctx.activation == ACT_LEAKY_RELU):
_check(_ext.leaky_relu_backward_cuda, x, dx, ctx.slope)
_check(_ext.leaky_relu_cuda, x, (1.0 / ctx.slope))
elif (ctx.activation == ACT_ELU):
_check(_ext.elu_backward_cuda, x, dx)
_check(_ext.elu_inv_cuda, x)
elif (ctx.activation == ACT_NONE):
pass |
def dice_coeff(prediction, target):
mask = np.zeros_like(prediction)
mask[(prediction >= 0.5)] = 1
inter = np.sum((mask * target))
union = (np.sum(mask) + np.sum(target))
epsilon = 1e-06
result = np.mean(((2 * inter) / (union + epsilon)))
return result |
def calc_psnr_and_ssim(img1, img2):
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
psnr = calculate_psnr(img1, img2)
ssim = measure.compare_ssim(img1, img2, data_range=255, multichannel=True, win_size=65)
return (psnr, ssim) |
def select_by_path_component(path_pattern, possible_matches, recursion_index=0):
import collections
import re
if (recursion_index == 0):
matches = {p for p in possible_matches if p.startswith(path_pattern)}
if matches:
return matches
split_path_pattern = path_pattern.split('/')
if (recursion_index > len(split_path_pattern)):
return possible_matches
path_working = '/'.join(split_path_pattern[:(recursion_index + 1)])
path_suffix = '/'.join(split_path_pattern[(recursion_index + 1):])
if path_suffix:
path_suffix = ('/' + path_suffix)
matches = collections.defaultdict(list)
for f in possible_matches:
if f.startswith(path_working):
next_slash = f.find('/', ((f.index(path_working) + len(path_working)) - 1))
if (next_slash < 0):
next_slash = len(f)
matched_component = f[:next_slash]
matches[path_working].append((matched_component, f))
if (not matches):
path_regex = re.compile(path_working)
component_regex = re.compile((path_working + '[^/]*'))
for f in possible_matches:
m = path_regex.match(f)
if m:
match = m.group()
matched_component = component_regex.match(f).group()
matches[match].append((matched_component, f))
best_matches = {}
for match in matches:
best_component = max(set((m[0] for m in matches[match])))
best_possible_matches = [m[1] for m in matches[match] if (m[0] == best_component)]
best_matches[best_component] = best_possible_matches
selected = {path for best_component in best_matches for path in select_by_path_component((best_component + path_suffix), possible_matches=best_matches[best_component], recursion_index=(recursion_index + 1))}
return selected |
def generate_caption(model, image, use_nucleus_sampling=False, num_beams=3, max_length=40, min_length=5):
samples = {'image': image}
captions = []
if use_nucleus_sampling:
for _ in range(5):
caption = model.generate(samples, use_nucleus_sampling=True, max_length=max_length, min_length=min_length, top_p=0.9)
captions.append(caption[0])
else:
caption = model.generate(samples, use_nucleus_sampling=False, num_beams=num_beams, max_length=max_length, min_length=min_length)
captions.append(caption[0])
return captions |
def load_stylegan_decoder(px=1024, dataset='ffhq'):
from .StyleGAN.model import G_mapping, G_synthesis
ckpt_path = CKPT_PATHS[f'StyleGAN_{px}']
if (px == 64):
decoder = nn.Sequential(OrderedDict([('g_mapping', G_mapping()), ('g_synthesis', G_synthesis(dlatent_size=512, resolution=64, blur_filter=None, fmap_base=8192, fmap_decay=1.0, use_styles=True, const_input_layer=True, use_noise=True, randomize_noise=True, nonlinearity='lrelu', use_wscale=True))]))
elif ((px == 1024) and (dataset == 'ffhq')):
decoder = nn.Sequential(OrderedDict([('g_mapping', G_mapping()), ('g_synthesis', G_synthesis())]))
else:
raise Exception('No SyleGAN for specified resolution and dataset')
decoder.load_state_dict(torch.load(ckpt_path))
return decoder |
class AdditiveAbelianGroupWrapperElement(addgp.AdditiveAbelianGroupElement):
def __init__(self, parent, vector, element=None, check=False):
addgp.AdditiveAbelianGroupElement.__init__(self, parent, vector, check)
if (element is not None):
element = self.parent().universe()(element)
self._element = element
def element(self):
if (self._element is None):
self._element = self.parent().discrete_exp(self._hermite_lift())
return self._element
def _repr_(self):
return repr(self.element()) |
def filepath_sent2_chunk(tmpdir):
tmpfile = tmpdir.join('STSint.testinput.answers-students.sent2.chunk.txt')
tmpfile.write('[ Bulbs A and C ] [ are ] [ still ] [ in closed paths ]\n[ Terminal 1 and the positive terminal ] [ are separated ] [ by the gap ]\n[ Terminal 2 and the positive terminal ] [ are separated ] [ by the gap ]\n[ The terminals ] [ are ] [ in the same state. ]\n[ The switch and the bulb ] [ have to be ] [ in the same path. ]')
return tmpfile.strpath |
def normalization_variations(string):
from snips_nlu_utils import normalize
return {normalize(string)} |
def match_similar_filenames(file1, file2):
if (file1 == file2):
return True
return match_by_parts(file1, file2, 'camel-case')
return match_by_parts(file1, file2, 'underscore') |
def get_device(gpu_id=None):
if (gpu_id is None):
gpu_str = ''
elif isinstance(gpu_id, int):
gpu_str = f':{gpu_id}'
else:
raise TypeError('Input should be int value.')
if IS_HIGH_VERSION:
if torch.backends.mps.is_available():
return torch.device(('mps' + gpu_str))
return torch.device((('cuda' + gpu_str) if (torch.cuda.is_available() and torch.backends.cudnn.is_available()) else 'cpu')) |
def load_datasets(name: str) -> Tuple[(CVDataset, CVDataset, CVDataset)]:
datasets_map: Dict[(str, CVDataset)] = {'omniglot': (paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28)), paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28)), paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28)))}
if (name not in datasets_map):
names = ','.join(list(datasets_map.keys()))
raise ValueError(f'{name} is not a valid dataset name, which should be in {names}')
return datasets_map[name] |
def extract(bagfile, pose_topic, msg_type, out_filename):
n = 0
f = open(out_filename, 'w')
f.write('# timestamp tx ty tz qx qy qz qw\n')
with rosbag.Bag(bagfile, 'r') as bag:
for (topic, msg, ts) in bag.read_messages(topics=str(pose_topic)):
if (msg_type == 'PoseWithCovarianceStamped'):
f.write(('%.12f %.12f %.12f %.12f %.12f %.12f %.12f %.12f\n' % (msg.header.stamp.to_sec(), msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z, msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)))
elif (msg_type == 'PoseStamped'):
f.write(('%.12f %.12f %.12f %.12f %.12f %.12f %.12f %.12f\n' % (msg.header.stamp.to_sec(), msg.pose.position.x, msg.pose.position.y, msg.pose.position.z, msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w)))
else:
assert False, 'Unknown message type'
n += 1
print(((('wrote ' + str(n)) + ' imu messages to the file: ') + out_filename)) |
def likelihood_fun(params, n_obs=50):
return RNG.normal(loc=params, size=(n_obs, params.shape[0])) |
def oid_challenge_classes():
return ['Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', 'Vehicle registration plate', 'Microphone', 'Musical keyboard', 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', 'Tablet computer', 'Pillow', 'Kitchen & dining room table', 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon'] |
def download_model(name: str) -> str:
(model_name, model_type, model_url) = ModelInfo.get_model_info(name)
model_path = _create_dirs(model_name)
if (model_type == 'single'):
model_path = _download_file(model_url, model_path)
elif (model_type == 'zip'):
model_path = _download_zip_model(model_url, model_path)
else:
print(f'model type {model_type} not yet implemented')
model_path = ''
return model_path |
def fit_uniform_dist(xs):
n = xs.shape[0]
ranges = (np.max(xs, axis=0) - np.min(xs, axis=0))
lower = (np.max(xs, axis=0) - ((ranges * (n + 2)) / n))
upper = (np.min(xs, axis=0) + ((ranges * (n + 2)) / n))
return (lower, upper) |
_operation
def exp_imag(a: torch.Tensor):
a = a.unsqueeze((- 1))
return torch.cat((torch.cos(a), torch.sin(a)), (- 1)) |
class POWER():
class Data():
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
(trn, val, tst) = load_data_normalised()
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1] |
class PoolFormerEmbeddings(nn.Module):
def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None):
super().__init__()
patch_size = (patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size))
stride = (stride if isinstance(stride, collections.abc.Iterable) else (stride, stride))
padding = (padding if isinstance(padding, collections.abc.Iterable) else (padding, padding))
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=padding)
self.norm = (norm_layer(hidden_size) if norm_layer else nn.Identity())
def forward(self, pixel_values):
embeddings = self.projection(pixel_values)
embeddings = self.norm(embeddings)
return embeddings |
class Relabel(pymia_fltr.Filter):
def __init__(self, label_changes: typing.Dict[(int, typing.Union[(int, tuple)])]) -> None:
super().__init__()
self.label_changes = label_changes
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
np_img = sitk.GetArrayFromImage(image)
new_np_img = np_img.copy()
for (new_label, old_labels) in self.label_changes.items():
mask = np.in1d(np_img.ravel(), old_labels).reshape(np_img.shape)
new_np_img[mask] = new_label
new_img = sitk.GetImageFromArray(new_np_img)
new_img.CopyInformation(image)
return new_img
def __str__(self):
str_list = []
for (k, v) in self.label_changes.items():
str_list.append('{}->{}'.format(k, v))
return 'Relabel:\n label_changes: {label_changes}\n'.format(self=self, label_changes='; '.join(str_list)) |
class CamembertForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def process_and_save(filename, output_dir):
music = process(filename)
music.save((output_dir / Path(filename).with_suffix('.json').name))
return music |
class AdditionRNNModel(object):
def __init__(self, max_digits=15, hidden_size=128, batch_size=4096, invert=True, optimizer_lr=0.001, clipnorm=None, logdir=None):
self.max_digits = max_digits
self.hidden_size = hidden_size
self.batch_size = batch_size
self.invert = invert
self.optimizer_lr = optimizer_lr
self.clipnorm = clipnorm
self.logdir = logdir
self.maxlen = ((max_digits + 1) + max_digits)
self.chars = '+ '
self.num_chars = len(self.chars)
self.ctable = CharacterTable(self.chars, self.maxlen)
self.epochs = 0
self.make_model()
if logdir:
self.callbacks = [TensorBoard(log_dir=self.logdir)]
else:
self.callbacks = []
def make_model(self):
input = Input(shape=(self.maxlen, self.num_chars))
x = recurrent.LSTM(self.hidden_size)(input)
x = RepeatVector((self.max_digits + 1))(x)
x = recurrent.LSTM(self.hidden_size, return_sequences=True)(x)
x = TimeDistributed(Dense(self.num_chars, activation='softmax'))(x)
def full_number_accuracy(y_true, y_pred):
y_true_argmax = K.argmax(y_true)
y_pred_argmax = K.argmax(y_pred)
tfd = K.equal(y_true_argmax, y_pred_argmax)
tfn = K.all(tfd, axis=1)
tfc = K.cast(tfn, dtype='float32')
tfm = K.mean(tfc)
return tfm
self.model = Model(inputs=input, outputs=x)
self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=self.optimizer_lr, clipnorm=self.clipnorm), metrics=['accuracy', full_number_accuracy])
def generate_data(self, dist, size):
questions = []
expected = []
lengths = []
while (len(questions) < size):
gen_digits = (1 + np.random.choice(len(dist), p=dist))
f = (lambda : int(''.join((np.random.choice(list('')) for i in range(gen_digits)))))
(a, b) = (f(), f())
q = '{}+{}'.format(a, b)
query = (q + (' ' * (self.maxlen - len(q))))
ans = str((a + b))
ans += (' ' * ((self.max_digits + 1) - len(ans)))
if self.invert:
query = query[::(- 1)]
questions.append(query)
expected.append(ans)
lengths.append(gen_digits)
X = np.zeros((len(questions), self.maxlen, self.num_chars), dtype=np.bool)
y = np.zeros((len(questions), (self.max_digits + 1), self.num_chars), dtype=np.bool)
for (i, sentence) in enumerate(questions):
X[i] = self.ctable.encode(sentence, maxlen=self.maxlen)
for (i, sentence) in enumerate(expected):
y[i] = self.ctable.encode(sentence, maxlen=(self.max_digits + 1))
return (X, y, np.array(lengths))
def accuracy_per_length(self, X, y, lens):
p = self.model.predict(X, batch_size=self.batch_size)
y = np.argmax(y, axis=(- 1))
p = np.argmax(p, axis=(- 1))
accs = []
for i in range(self.max_digits):
yl = y[(lens == (i + 1))]
pl = p[(lens == (i + 1))]
tf = np.all((yl == pl), axis=1)
accs.append(np.mean(tf))
return np.array(accs)
def train_epoch(self, train_data, val_data=None):
(train_X, train_y, train_lens) = train_data
if (val_data is not None):
(val_X, val_y, val_lens) = val_data
history = self.model.fit(train_X, train_y, batch_size=self.batch_size, epochs=(self.epochs + 1), validation_data=((val_X, val_y) if val_data else None), initial_epoch=self.epochs, callbacks=self.callbacks)
self.epochs += 1
return history.history |
((not torch.cuda.is_available()), 'test requires a GPU')
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([(((self.x * weight) + bias) + self.error)]).cuda().half()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().half()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create({'optimization': {'lr': [0.1]}, 'optimizer': {'_name': 'adam', 'lr': [0.1], 'adam_betas': '(0.9, 0.999)', 'adam_eps': 1e-08, 'weight_decay': 0.0}, 'common': {'fp16_init_scale': 1, 'fp16_scale_window': 1, 'fp16_scale_tolerance': 1, 'threshold_loss_scale': 1, 'min_loss_scale': 0.0001, 'tpu': False}})
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device='cuda:0', dtype=torch.float16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(model.weight, torch.tensor([[3.0996]], device='cuda:0', dtype=torch.float16, requires_grad=True))
self.assertEqual(model.bias, torch.tensor([5.1016], device='cuda:0', dtype=torch.float16, requires_grad=True))
self.assertEqual(optimizer.scaler.loss_scale, 2.0)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(all((torch.all(fp32_params.eq(torch.tensor([3.1, 5.1], device='cuda:0', requires_grad=True))) for fp32_params in optimizer.fp32_params.values())))
def test_memory_efficient(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer) |
def iterate_training(args, trainer, train_trees, train_sequences, transitions, dev_trees, silver_trees, silver_sequences, foundation_cache, model_save_each_filename, evaluator):
model = trainer.model
if (args['loss'] == 'cross'):
logger.info('Building CrossEntropyLoss(sum)')
process_outputs = (lambda x: x)
model_loss_function = nn.CrossEntropyLoss(reduction='sum')
elif (args['loss'] == 'focal'):
try:
from focal_loss.focal_loss import FocalLoss
except ImportError:
raise ImportError('focal_loss not installed. Must `pip install focal_loss_torch` to use the --loss=focal feature')
logger.info('Building FocalLoss, gamma=%f', args['loss_focal_gamma'])
process_outputs = (lambda x: torch.softmax(x, dim=1))
model_loss_function = FocalLoss(reduction='sum', gamma=args['loss_focal_gamma'])
elif (args['loss'] == 'large_margin'):
logger.info('Building LargeMarginInSoftmaxLoss(sum)')
process_outputs = (lambda x: x)
model_loss_function = LargeMarginInSoftmaxLoss(reduction='sum')
else:
raise ValueError(('Unexpected loss term: %s' % args['loss']))
device = next(model.parameters()).device
model_loss_function.to(device)
transition_tensors = {x: torch.tensor(y, requires_grad=False, device=device).unsqueeze(0) for (y, x) in enumerate(model.transitions)}
model.train()
train_data = compose_train_data(train_trees, train_sequences)
silver_data = compose_train_data(silver_trees, silver_sequences)
if (not args['epoch_size']):
args['epoch_size'] = len(train_data)
if (silver_data and (not args['silver_epoch_size'])):
args['silver_epoch_size'] = args['epoch_size']
if args['multistage']:
multistage_splits = {}
multistage_splits[(args['epochs'] // 2)] = (args['pattn_num_layers'], False)
if LSTMModel.uses_lattn(args):
multistage_splits[((args['epochs'] * 3) // 4)] = (args['pattn_num_layers'], True)
oracle = None
if (args['transition_scheme'] is TransitionScheme.IN_ORDER):
oracle = InOrderOracle(model.root_labels, args['oracle_level'])
leftover_training_data = []
leftover_silver_data = []
if (trainer.best_epoch > 0):
logger.info('Restarting trainer with a model trained for %d epochs. Best epoch %d, f1 %f', trainer.epochs_trained, trainer.best_epoch, trainer.best_f1)
if (model_save_each_filename and (trainer.epochs_trained == 0)):
trainer.save((model_save_each_filename % trainer.epochs_trained), save_optimizer=True)
for trainer.epochs_trained in range((trainer.epochs_trained + 1), (args['epochs'] + 1)):
model.train()
logger.info('Starting epoch %d', trainer.epochs_trained)
update_bert_learning_rate(args, trainer.optimizer, trainer.epochs_trained)
if args['log_norms']:
model.log_norms()
(leftover_training_data, epoch_data) = next_epoch_data(leftover_training_data, train_data, args['epoch_size'])
(leftover_silver_data, epoch_silver_data) = next_epoch_data(leftover_silver_data, silver_data, args['silver_epoch_size'])
epoch_data = (epoch_data + epoch_silver_data)
epoch_data.sort(key=(lambda x: len(x[1])))
epoch_stats = train_model_one_epoch(trainer.epochs_trained, trainer, transition_tensors, process_outputs, model_loss_function, epoch_data, oracle, args)
(f1, _) = run_dev_set(model, dev_trees, dev_trees, args, evaluator)
if ((f1 > trainer.best_f1) or ((trainer.best_epoch == 0) and (trainer.best_f1 == 0.0))):
logger.info('New best dev score: %.5f > %.5f', f1, trainer.best_f1)
trainer.best_f1 = f1
trainer.best_epoch = trainer.epochs_trained
trainer.save(args['save_name'], save_optimizer=False)
if (epoch_stats.nans > 0):
logger.warning('Had to ignore %d batches with NaN', epoch_stats.nans)
logger.info('Epoch %d finished\n Transitions correct: %s\n Transitions incorrect: %s\n Total loss for epoch: %.5f\n Dev score (%5d): %8f\n Best dev score (%5d): %8f', trainer.epochs_trained, epoch_stats.transitions_correct, epoch_stats.transitions_incorrect, epoch_stats.epoch_loss, trainer.epochs_trained, f1, trainer.best_epoch, trainer.best_f1)
old_lr = trainer.optimizer.param_groups[0]['lr']
trainer.scheduler.step(f1)
new_lr = trainer.optimizer.param_groups[0]['lr']
if (old_lr != new_lr):
logger.info('Updating learning rate from %f to %f', old_lr, new_lr)
if args['wandb']:
wandb.log({'epoch_loss': epoch_stats.epoch_loss, 'dev_score': f1}, step=trainer.epochs_trained)
if args['wandb_norm_regex']:
watch_regex = re.compile(args['wandb_norm_regex'])
for (n, p) in model.named_parameters():
if watch_regex.search(n):
wandb.log({n: torch.linalg.norm(p)})
if (args['multistage'] and (trainer.epochs_trained in multistage_splits)):
epochs_trained = trainer.epochs_trained
batches_trained = trainer.batches_trained
(stage_pattn_layers, stage_uses_lattn) = multistage_splits[epochs_trained]
temp_args = copy.deepcopy(model.args)
temp_args.pop('pattn_num_layers', None)
temp_args.pop('lattn_d_proj', None)
trainer = Trainer.load(args['save_name'], temp_args, load_optimizer=False, foundation_cache=foundation_cache)
model = trainer.model
logger.info('Finished stage at epoch %d. Restarting optimizer', epochs_trained)
logger.info('Previous best model was at epoch %d', trainer.epochs_trained)
temp_args = dict(args)
logger.info('Switching to a model with %d pattn layers and %slattn', stage_pattn_layers, ('' if stage_uses_lattn else 'NO '))
temp_args['pattn_num_layers'] = stage_pattn_layers
if (not stage_uses_lattn):
temp_args['lattn_d_proj'] = 0
pt = foundation_cache.load_pretrain(args['wordvec_pretrain_file'])
forward_charlm = foundation_cache.load_charlm(args['charlm_forward_file'])
backward_charlm = foundation_cache.load_charlm(args['charlm_backward_file'])
(bert_model, bert_tokenizer) = foundation_cache.load_bert(args['bert_model'])
new_model = LSTMModel(pt, forward_charlm, backward_charlm, bert_model, bert_tokenizer, model.force_bert_saved, model.transitions, model.constituents, model.tags, model.delta_words, model.rare_words, model.root_labels, model.constituent_opens, model.unary_limit(), temp_args)
new_model.to(device)
new_model.copy_with_new_structure(model)
optimizer = build_optimizer(temp_args, new_model, False)
scheduler = build_scheduler(temp_args, optimizer)
trainer = Trainer(new_model, optimizer, scheduler, epochs_trained, batches_trained, trainer.best_f1, trainer.best_epoch)
add_grad_clipping(trainer, args['grad_clipping'])
model = new_model
if (args['checkpoint'] and args['checkpoint_save_name']):
trainer.save(args['checkpoint_save_name'], save_optimizer=True)
if model_save_each_filename:
trainer.save((model_save_each_filename % trainer.epochs_trained), save_optimizer=True)
return trainer |
_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack)
def test_mixed_inner_loops():
x = ti.ndarray(dtype=ti.f32, shape=(1,), needs_grad=True)
arr = ti.ndarray(dtype=ti.f32, shape=5)
loss = ti.ndarray(dtype=ti.f32, shape=(1,), needs_grad=True)
def mixed_inner_loops(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.types.ndarray()):
for i in arr:
loss[0] += ti.sin(x[0])
for j in range(2):
loss[0] += (ti.sin(x[0]) + 1.0)
loss.grad[0] = 1.0
x[0] = 0.0
mixed_inner_loops(x, arr, loss)
mixed_inner_loops.grad(x, arr, loss)
assert (loss[0] == 10.0)
assert (x.grad[0] == 15.0) |
def _get_generic_omop_transformations() -> Sequence[Callable[([RawPatient], Optional[RawPatient])]]:
transforms: Sequence[Callable[([RawPatient], Optional[RawPatient])]] = [remove_nones, delta_encode]
return transforms |
def local_density(self, p, m):
n = self.dim()
if (n == 0):
raise TypeError("we do not currently handle 0-dim'l forms")
Q_local = self.local_normal_form(p)
if (n == 1):
p_valuation = valuation(Q_local[(0, 0)], p)
else:
p_valuation = min(valuation(Q_local[(0, 0)], p), valuation(Q_local[(0, 1)], p))
if ((m != 0) and (valuation(m, p) < p_valuation)):
return QQ(0)
p_adjustment = (QQ(1) / (p ** p_valuation))
m_prim = (QQ(m) / (p ** p_valuation))
Q_prim = Q_local.scale_by_factor(p_adjustment)
return Q_prim.local_density_congruence(p, m_prim) |
def test_in_place_wrapper_broadcasting():
array = ak.Array({'x': np.arange(3)})
array['unknown field'] = None
assert (array['unknown field'].to_list() == [None, None, None])
assert (ak.operations.fields(array) == ['x', 'unknown field']) |
class AutoModelWithLMHead():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def test_totalvi_online_update(save_path):
n_latent = 5
adata1 = synthetic_iid()
TOTALVI.setup_anndata(adata1, batch_key='batch', protein_expression_obsm_key='protein_expression', protein_names_uns_key='protein_names')
model = TOTALVI(adata1, n_latent=n_latent, use_batch_norm='decoder')
model.train(1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, 'saved_model/')
model.save(dir_path, overwrite=True)
adata2 = synthetic_iid()
adata2.obs['batch'] = adata2.obs.batch.cat.rename_categories(['batch_2', 'batch_3'])
model2 = TOTALVI.load_query_data(adata2, dir_path)
assert (model2.module.background_pro_alpha.requires_grad is True)
model2.train(max_epochs=1)
model2.get_latent_representation()
adata2 = synthetic_iid()
adata2.obs['batch'] = adata2.obs.batch.cat.rename_categories(['batch_2', 'batch_3'])
adata2.obsm['protein_expression'][(adata2.obs.batch == 'batch_3')] = 0
model3 = TOTALVI.load_query_data(adata2, model)
model3.train(max_epochs=1)
model3.get_latent_representation() |
def destroy_window(window):
_glfw.glfwDestroyWindow(window)
window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_ulong)).contents.value
for callback_repository in _callback_repositories:
del callback_repository[window_addr] |
def add_snippets_to_query(snippets, ignored_entities, query, prob_align=1.0):
query_copy = copy.copy(query)
sorted_snippets = sorted(snippets, key=(lambda s: len(s.sequence)))[::(- 1)]
for snippet in sorted_snippets:
ignore = False
snippet_seq = snippet.sequence
for entity in ignored_entities:
ignore = (ignore or util.subsequence(entity, snippet_seq))
if (not ignore):
snippet_length = len(snippet_seq)
for start_idx in range(((len(query_copy) - snippet_length) + 1)):
if (query_copy[start_idx:(start_idx + snippet_length)] == snippet_seq):
align = (random.random() < prob_align)
if align:
prev_length = len(query_copy)
query_copy[start_idx] = snippet.name
query_copy = (query_copy[:(start_idx + 1)] + query_copy[(start_idx + snippet_length):])
assert (len(query_copy) == (prev_length - (snippet_length - 1)))
return query_copy |
def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0):
mode_dict = {'iou': 0, 'iof': 1}
assert (mode in mode_dict.keys())
mode_flag = mode_dict[mode]
assert ((bboxes1.size((- 1)) == 4) or (bboxes1.size(0) == 0))
assert ((bboxes2.size((- 1)) == 4) or (bboxes2.size(0) == 0))
assert ((offset == 1) or (offset == 0))
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if aligned:
assert (rows == cols)
if ((rows * cols) == 0):
return (bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols))
if aligned:
ious = bboxes1.new_zeros(rows)
else:
ious = bboxes1.new_zeros((rows, cols))
ext_module.bbox_overlaps(bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset)
return ious |
class TextMetric(object):
def __init__(self, text):
self.text = text
self.k = 0
self.n = 0
def reset(self):
pass
def value(self):
self.n = max(1, self.n)
return ((1.0 * self.k) / self.n)
def show(self):
return ('%.2f' % (1.0 * self.value())) |
class Fixed(Masker):
def __init__(self):
self.shape = (None, 0)
self.clustering = np.zeros((0, 4))
def __call__(self, mask, x):
return ([x],)
def mask_shapes(self, x):
return [(0,)] |
def sparsity(cl_data_file):
class_list = cl_data_file.keys()
cl_sparsity = []
for cl in class_list:
cl_sparsity.append(np.mean([np.sum((x != 0)) for x in cl_data_file[cl]]))
return np.mean(cl_sparsity) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.