code stringlengths 101 5.91M |
|---|
class LayeredModel(ModelBase, metaclass=AutodocABCMeta):
config_class = LayeredModelConfig
def __new__(cls, config: LayeredModelConfig=None, model: ModelBase=None, **kwargs):
original_cls = cls
config = cls._resolve_args(config=config, model=model, **kwargs)
if isinstance(config.model, ForecastingDetectorBase):
cls = cls.__class__(cls.__name__, (cls, LayeredForecastingDetector), {})
setattr(cls, '_original_cls', original_cls)
elif isinstance(config.model, ForecasterBase):
cls = cls.__class__(cls.__name__, (cls, LayeredForecaster), {})
setattr(cls, '_original_cls', original_cls)
elif isinstance(config.model, DetectorBase):
cls = cls.__class__(cls.__name__, (cls, LayeredDetector), {})
setattr(cls, '_original_cls', original_cls)
return super().__new__(cls)
def __init__(self, config: LayeredModelConfig=None, model: ModelBase=None, **kwargs):
super().__init__(config=self._resolve_args(config=config, model=model, **kwargs))
def _resolve_args(cls, config: LayeredModelConfig, model: ModelBase, **kwargs):
if ((config is None) and (model is None)):
raise RuntimeError(f'Expected at least one of `config` or `model` when creating {cls.__name__}. Received neither.')
elif ((config is not None) and (model is not None)):
if (config.model is None):
if isinstance(model, dict):
model = ModelFactory.create(**model)
config = copy.copy(config)
config.model = model
else:
raise RuntimeError(f'Expected at most one of `config.model` or `model` when creating {cls.__name__}. Received both.')
elif (config is None):
config = cls.config_class(model=model, **kwargs)
return config
def _pandas_train(self):
return self.model._pandas_train
def require_even_sampling(self) -> bool:
return False
def require_univariate(self) -> bool:
return False
def model(self):
return self.config.model
def model(self, model):
self.config.model = model
def base_model(self):
return self.config.base_model
def train_data(self):
return (None if (self.model is None) else self.model.train_data)
_data.setter
def train_data(self, train_data):
if (self.model is not None):
self.model.train_data = train_data
def _default_train_config(self):
return self.model._default_train_config
def reset(self):
if (self.model is not None):
self.model.reset()
self.__init__(config=self.config)
def __getstate__(self):
state = super().__getstate__()
state['model'] = (None if (self.model is None) else self.model.__getstate__())
return state
def __setstate__(self, state):
if ('model' in state):
model_state = state.pop('model')
if ((self.model is None) and (model_state is not None)):
raise ValueError(f'{type(self).__name__}.model is None, but received a non-None model state.')
elif ((self.model is None) or (model_state is None)):
self.model = None
else:
self.model.__setstate__(model_state)
super().__setstate__(state)
def __reduce__(self):
state_dict = self.__getstate__()
config = state_dict.pop('config')
return (getattr(self.__class__, '_original_cls', self.__class__), (config,), state_dict)
def _save_state(self, state_dict: Dict[(str, Any)], filename: str=None, **save_config) -> Dict[(str, Any)]:
state_dict.pop('config', None)
if (self.model is not None):
state_dict['model'] = self.model._save_state(state_dict['model'], filename=None, **save_config)
return super()._save_state(state_dict, filename, **save_config)
def __getattr__(self, item):
base_model = self.base_model
attr = getattr(base_model, item, None)
if callable(attr):
return attr
return self.__getattribute__(item)
def _train(self, train_data: pd.DataFrame, train_config=None, **kwargs):
return call_with_accepted_kwargs(self.model._train, train_data=train_data, train_config=train_config, **kwargs)
def train_pre_process(self, train_data: TimeSeries, **kwargs) -> TimeSeries:
has_resample = False
transforms = []
for t in TransformSequence([self.transform, self.model.transform]).transforms:
if isinstance(t, TemporalResample):
if (not has_resample):
transforms.append(t)
has_resample = True
else:
transforms.append(t)
self.transform = Identity()
self.model.transform = TransformSequence(transforms)
train_data = super().train_pre_process(train_data)
return call_with_accepted_kwargs(self.model.train_pre_process, train_data=train_data, **kwargs)
def train_post_process(self, train_result, **kwargs):
return call_with_accepted_kwargs(self.model.train_post_process, train_result=train_result, **kwargs) |
('/direct')
def direct():
pattern = request.args.get('pattern')
regex = re.compile(pattern)
return regex.search(text) |
_utils.test()
def test_break_in_static_for_in_non_static_if():
def test_static_loop():
for i in ti.static(range(5)):
x = 0.1
if (x == 0.0):
break
with pytest.raises(ti.TaichiSyntaxError, match='You are trying to `break` a static `for` loop'):
test_static_loop() |
def B2Q(uchar):
inside_code = ord(uchar)
if ((inside_code < 32) or (inside_code > 126)):
return uchar
if (inside_code == 32):
inside_code = 12288
else:
inside_code += 65248
return chr(inside_code) |
def test_eval_old_in_new():
param = parametrization.DirectParam((3, 2))
x = problem.Variable(2)
obj = (OldPower(x[0], 2) + x[0])
assert (graph_executor.eval_fun(obj, param) == 12)
np.testing.assert_array_equal(graph_executor.eval_grad(obj, param), [7, 0]) |
class ConformerBlock(nn.Module):
def __init__(self, *, dim, dim_head=64, heads=8, ff_mult=4, conv_expansion_factor=2, conv_kernel_size=31, attn_dropout=0.0, ff_dropout=0.0, conv_dropout=0.0):
super().__init__()
self.ff1 = FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)
self.attn = Attention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout)
self.conv = ConformerConvModule(dim=dim, causal=False, expansion_factor=conv_expansion_factor, kernel_size=conv_kernel_size, dropout=conv_dropout)
self.ff2 = FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)
self.attn = PreNorm(dim, self.attn)
self.ff1 = Scale(0.5, PreNorm(dim, self.ff1))
self.ff2 = Scale(0.5, PreNorm(dim, self.ff2))
self.post_norm = nn.LayerNorm(dim)
def forward(self, x, mask=None):
x = (self.ff1(x) + x)
x = (self.attn(x, mask=mask) + x)
x = (self.conv(x) + x)
x = (self.ff2(x) + x)
x = self.post_norm(x)
return x |
def shuffle_choices(x):
choices = sorted([k for k in x if ('choice' in k)])
choices_texts = [x[c] for c in choices]
correct_choice = choices_texts[x['labels']]
random.shuffle(choices_texts)
for (c, ct) in zip(choices, choices_texts):
x[c] = ct
x['labels'] = choices_texts.index(correct_choice)
return x |
def _nntxt_file_loader(ctx, file_loaders, nnp, filename, ext):
if (not ctx.parameter_only):
with get_file_handle_load(nnp, filename, ext) as f:
try:
text_format.Merge(f.read(), ctx.proto)
except:
logger.critical('Failed to read {}.'.format(filename))
logger.critical('2 byte characters may be used for file name or folder name.')
raise
if (len(ctx.proto.parameter) > 0):
if (not ctx.exclude_parameter):
_nntxt_parameter_file_loader(ctx, file_loaders, nnp, filename, ext) |
def convert_example_to_features(example, max_seq_length, tokenizer, mlm_loss):
tokens_a = example.tokens_a[:max_seq_length]
raw_label = example.raw_label
col_ids = [i for (i, x) in enumerate(tokens_a) if (x == SEP_TOKEN)][:(- 1)]
if (len(col_ids) != len(raw_label)):
print('tokens_a: ', tokens_a)
print('raw_label: ', raw_label)
assert (len(col_ids) == len(raw_label))
col_label_ids = ([(- 1)] * len(tokens_a))
for (cid, clb) in zip(col_ids, raw_label):
col_label_ids[cid] = clb
(tokens_b, lm_label_ids) = random_word(tokens_a, tokenizer)
if mlm_loss:
tokens_a = tokens_b
input_ids = tokenizer.convert_tokens_to_ids(tokens_a)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(1)
input_mask.append(0)
col_label_ids.append((- 1))
lm_label_ids.append((- 1))
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(lm_label_ids) == max_seq_length)
assert (len(col_label_ids) == max_seq_length)
if (example.guid < 5):
print('If using MLM loss: ', mlm_loss)
print('*** Example ***')
print(('guid: %s' % example.guid))
print(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
print(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
print(('col_label_ids: %s' % ' '.join([str(x) for x in col_label_ids])))
print(('lm_label_ids: %s' % ' '.join([str(x) for x in lm_label_ids])))
features = InputFeatures(input_ids=input_ids, input_mask=input_mask, col_label_ids=col_label_ids, lm_label_ids=lm_label_ids)
return features |
def node_to_text(test, f):
(result, name, reason, time_real) = read_test(test)
if reason:
reason = (' (%s)' % reason)
output = ('%s: Test Suite "%s" (%s)%s\n' % (result, name, time_real, reason))
f.write(output)
for details in test.findall('FailureDetails'):
f.write(' Details:\n')
f.write((' Message: %s\n' % details.find('Message').text))
f.write((' Condition: %s\n' % details.find('Condition').text))
f.write((' Actual: %s\n' % details.find('Actual').text))
f.write((' Limit: %s\n' % details.find('Limit').text))
f.write((' File: %s\n' % details.find('File').text))
f.write((' Line: %s\n' % details.find('Line').text))
for child in test.findall('Test'):
node_to_text(child, f) |
def test_get_max_value_key():
a_dictionary = {1: 10, 2: (- 10), 3: 1000, 4: 100, 5: 1}
key_max = get_max_value_key(a_dictionary)
assert (key_max == 3) |
def parse_code_example(code_lines):
has_doctest = (code_lines[0][:3] in DOCTEST_PROMPTS)
code_samples = []
outputs = []
in_code = True
current_bit = []
for line in code_lines:
if (in_code and has_doctest and (not is_empty_line(line)) and (line[:3] not in DOCTEST_PROMPTS)):
code_sample = '\n'.join(current_bit)
code_samples.append(code_sample.strip())
in_code = False
current_bit = []
elif ((not in_code) and (line[:3] in DOCTEST_PROMPTS)):
output = '\n'.join(current_bit)
outputs.append(output.strip())
in_code = True
current_bit = []
if (line[:3] in DOCTEST_PROMPTS):
line = line[4:]
current_bit.append(line)
if in_code:
code_sample = '\n'.join(current_bit)
code_samples.append(code_sample.strip())
else:
output = '\n'.join(current_bit)
outputs.append(output.strip())
return (code_samples, outputs) |
def unstack_state_dict(state_dict: StateDict, prefix: Optional[str]=None) -> StateDict:
new_dict: StateDict = {}
prefix = apply_prefix(prefix, '')
assert (prefix is not None)
for (k, v) in state_dict.items():
if (k.startswith(prefix) and (v is not None)):
for (i, v_i) in enumerate(v):
new_dict[f'{prefix}{i}.{k[len(prefix):]}'] = v_i
else:
new_dict[k] = v
return new_dict |
class AdditiveBlockFunction2(torch.autograd.Function):
def forward(ctx, xin, Fm, Gm, *weights):
assert ((xin.shape[1] % 2) == 0)
ctx.Fm = Fm
ctx.Gm = Gm
with torch.no_grad():
x = xin.detach()
(x1, x2) = torch.chunk(x, 2, dim=1)
(x1, x2) = (x1.contiguous(), x2.contiguous())
fmr = Fm.forward(x2)
y1 = (x1 + fmr)
x1.set_()
del x1
gmr = Gm.forward(y1)
y2 = (x2 + gmr)
x2.set_()
del x2
output = torch.cat([y1, y2], dim=1).detach_()
ctx.save_for_backward(x, output)
return output
def backward(ctx, grad_output):
(Fm, Gm) = (ctx.Fm, ctx.Gm)
(x, output) = ctx.saved_tensors
with torch.no_grad():
(y1, y2) = torch.chunk(output, 2, dim=1)
(y1, y2) = (y1.contiguous(), y2.contiguous())
assert ((grad_output.shape[1] % 2) == 0)
(y1_grad, y2_grad) = torch.chunk(grad_output, 2, dim=1)
(y1_grad, y2_grad) = (y1_grad.contiguous(), y2_grad.contiguous())
with set_grad_enabled(True):
z1_stop = y1.detach()
z1_stop.requires_grad = True
G_z1 = Gm.forward(z1_stop)
x2 = (y2 - G_z1)
x2_stop = x2.detach()
x2_stop.requires_grad = True
F_x2 = Fm.forward(x2_stop)
x1 = (y1 - F_x2)
x1_stop = x1.detach()
x1_stop.requires_grad = True
y1 = (x1_stop + F_x2)
y2 = (x2_stop + G_z1)
dd = torch.autograd.grad(y2, ((z1_stop,) + tuple(Gm.parameters())), y2_grad, retain_graph=False)
z1_grad = (dd[0] + y1_grad)
GWgrads = dd[1:]
dd = torch.autograd.grad(y1, ((x1_stop, x2_stop) + tuple(Fm.parameters())), z1_grad, retain_graph=False)
FWgrads = dd[2:]
x2_grad = (dd[1] + y2_grad)
x1_grad = dd[0]
grad_input = torch.cat([x1_grad, x2_grad], dim=1)
return (((grad_input, None, None) + FWgrads) + GWgrads) |
def dump_current_scores_of_devtest(args, m, xp):
for mode in ['dev', 'test']:
if (mode == 'dev'):
current_data = dev_data
if (mode == 'test'):
current_data = test_data
(scores, accuracy) = (list(), list())
for batch in chunked(current_data, args.test_batch_size):
with chainer.using_config('train', False), chainer.no_backprop_mode():
current_score = m.get_scores(batch, glinks, gold_relations, gedges, xp, mode)
for (v, (h, r, t, l)) in zip(current_score.data, batch):
values = (h, r, t, l, v)
values = map(str, values)
values = ','.join(values)
scores.append(values)
if (v < args.threshold):
if (l == 1):
accuracy.append(1.0)
else:
accuracy.append(0.0)
elif (l == 1):
accuracy.append(0.0)
else:
accuracy.append(1.0)
del current_score
trace('\t ', mode, (sum(accuracy) / len(accuracy)))
if (args.margin_file != ''):
with open(args.margin_file, 'a') as fp:
fp.write((((mode + ':') + ' '.join(scores)) + '\n')) |
def create_feature_columns() -> Tuple[(list, list, list)]:
(category_feature_columns, dense_feature_columns) = ([], [])
label_feature_columns = []
videoplayseconds = fc.numeric_column('videoplayseconds', default_value=0.0)
u_read_comment_7d_sum = fc.numeric_column('u_read_comment_7d_sum', default_value=0.0)
u_like_7d_sum = fc.numeric_column('u_like_7d_sum', default_value=0.0)
u_click_avatar_7d_sum = fc.numeric_column('u_click_avatar_7d_sum', default_value=0.0)
u_forward_7d_sum = fc.numeric_column('u_forward_7d_sum', default_value=0.0)
u_comment_7d_sum = fc.numeric_column('u_comment_7d_sum', default_value=0.0)
u_follow_7d_sum = fc.numeric_column('u_follow_7d_sum', default_value=0.0)
u_favorite_7d_sum = fc.numeric_column('u_favorite_7d_sum', default_value=0.0)
i_read_comment_7d_sum = fc.numeric_column('i_read_comment_7d_sum', default_value=0.0)
i_like_7d_sum = fc.numeric_column('i_like_7d_sum', default_value=0.0)
i_click_avatar_7d_sum = fc.numeric_column('i_click_avatar_7d_sum', default_value=0.0)
i_forward_7d_sum = fc.numeric_column('i_forward_7d_sum', default_value=0.0)
i_comment_7d_sum = fc.numeric_column('i_comment_7d_sum', default_value=0.0)
i_follow_7d_sum = fc.numeric_column('i_follow_7d_sum', default_value=0.0)
i_favorite_7d_sum = fc.numeric_column('i_favorite_7d_sum', default_value=0.0)
c_user_author_read_comment_7d_sum = fc.numeric_column('c_user_author_read_comment_7d_sum', default_value=0.0)
dense_feature_columns += [videoplayseconds, u_read_comment_7d_sum, u_like_7d_sum, u_click_avatar_7d_sum, u_forward_7d_sum, u_comment_7d_sum, u_follow_7d_sum, u_favorite_7d_sum, i_read_comment_7d_sum, i_like_7d_sum, i_click_avatar_7d_sum, i_forward_7d_sum, i_comment_7d_sum, i_follow_7d_sum, i_favorite_7d_sum, c_user_author_read_comment_7d_sum]
userid = fc.categorical_column_with_vocabulary_file('userid', os.path.join(FLAGS.vocabulary_dir, 'userid.txt'))
feedid = fc.categorical_column_with_vocabulary_file('feedid', os.path.join(FLAGS.vocabulary_dir, 'feedid.txt'))
device = fc.categorical_column_with_vocabulary_file('device', os.path.join(FLAGS.vocabulary_dir, 'device.txt'))
authorid = fc.categorical_column_with_vocabulary_file('authorid', os.path.join(FLAGS.vocabulary_dir, 'authorid.txt'))
bgm_song_id = fc.categorical_column_with_vocabulary_file('bgm_song_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_song_id.txt'))
bgm_singer_id = fc.categorical_column_with_vocabulary_file('bgm_singer_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_singer_id.txt'))
manual_tag_list = fc.categorical_column_with_vocabulary_file('manual_tag_list', os.path.join(FLAGS.vocabulary_dir, 'manual_tag_id.txt'))
his_read_comment_7d_seq = fc.categorical_column_with_vocabulary_file('his_read_comment_7d_seq', os.path.join(FLAGS.vocabulary_dir, 'feedid.txt'))
userid_emb = fc.embedding_column(userid, FLAGS.embedding_dim)
feedid_emb = fc.shared_embedding_columns([feedid, his_read_comment_7d_seq], FLAGS.embedding_dim, combiner='mean')
device_emb = fc.embedding_column(device, FLAGS.embedding_dim)
authorid_emb = fc.embedding_column(authorid, FLAGS.embedding_dim)
bgm_song_id_emb = fc.embedding_column(bgm_song_id, FLAGS.embedding_dim)
bgm_singer_id_emb = fc.embedding_column(bgm_singer_id, FLAGS.embedding_dim)
manual_tag_id_emb = fc.embedding_column(manual_tag_list, FLAGS.embedding_dim, combiner='mean')
category_feature_columns += [userid_emb, device_emb, authorid_emb, bgm_song_id_emb, bgm_singer_id_emb, manual_tag_id_emb]
category_feature_columns += feedid_emb
read_comment = fc.numeric_column('read_comment', default_value=0.0)
label_feature_columns += [read_comment]
return (dense_feature_columns, category_feature_columns, label_feature_columns) |
def compute_on_dataset(model, data_loader, device, timer=None):
model.eval()
results_dict = {}
cpu_device = torch.device('cpu')
for (_, batch) in enumerate(tqdm(data_loader)):
(images, targets, image_ids) = batch
with torch.no_grad():
if timer:
timer.tic()
if cfg.TEST.BBOX_AUG.ENABLED:
if cfg.TEST.BBOX_AUG.VOTE:
output = im_detect_bbox_aug_vote(model, images, device)
else:
output = im_detect_bbox_aug(model, images, device)
else:
output = model(images.to(device))
if timer:
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
results_dict.update({img_id: result for (img_id, result) in zip(image_ids, output)})
return results_dict |
class FCOSFPN(nn.Module):
def __init__(self, in_channels=[512, 1024, 2048], out_channels=256):
super(FCOSFPN, self).__init__()
self.prj_3 = nn.Conv2d(in_channels[0], out_channels, kernel_size=1)
self.prj_4 = nn.Conv2d(in_channels[1], out_channels, kernel_size=1)
self.prj_5 = nn.Conv2d(in_channels[2], out_channels, kernel_size=1)
self.conv_5 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv_out6 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1)
self.conv_out7 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1)
self.init_weights()
def upsamplelike(self, inputs):
(src, target) = inputs
return F.interpolate(src, size=(target.shape[2], target.shape[3]), mode='nearest')
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def forward(self, x):
(C3, C4, C5) = x
P3 = self.prj_3(C3)
P4 = self.prj_4(C4)
P5 = self.prj_5(C5)
P4 = (P4 + self.upsamplelike([P5, C4]))
P3 = (P3 + self.upsamplelike([P4, C3]))
P3 = self.conv_3(P3)
P4 = self.conv_4(P4)
P5 = self.conv_5(P5)
P6 = self.conv_out6(P5)
P7 = self.conv_out7(F.relu(P6))
return [P3, P4, P5, P6, P7] |
def add_cb_config(parser):
parser.add_argument('--cb_dimension', default='2D', type=str, choices=('1D', '2D', '6D'), help='Select which dimension to visualize for convergence basin.\n')
parser.add_argument('--save_img', action='store_true', help='Save visualizations.\n')
parser.add_argument('--reset_cb', action='store_true', help='Save visualizations.\n')
parser.add_argument('--pert_samples', default=31, type=int, help='perturbation samples in each pose dimension') |
class TransformStack(InvertibleTransformBase):
def __init__(self, transforms, *, check_aligned=True):
super().__init__()
self.transforms = []
for t in transforms:
assert isinstance(t, (TransformBase, dict)), f'Expected all transforms to be instances of TransformBase, or dict, but got {transforms}'
if isinstance(t, dict):
t = TransformFactory.create(**t)
self.transforms.append(t)
self.check_aligned = check_aligned
def proper_inversion(self):
return any((f.proper_inversion for f in self.transforms))
def requires_inversion_state(self):
return True
def train(self, time_series: TimeSeries):
for f in self.transforms:
f.train(time_series)
def __call__(self, time_series: TimeSeries) -> TimeSeries:
ts_list = [f(time_series) for f in self.transforms]
if self.proper_inversion:
idx = min((i for (i, f) in enumerate(self.transforms) if f.proper_inversion))
d0 = sum((ts.dim for ts in ts_list[:idx]))
df = (d0 + ts_list[idx].dim)
self.inversion_state = (idx, d0, df, time_series.names)
else:
self.inversion_state = (0, 0, ts_list[0].dim, time_series.names)
return TimeSeries.from_ts_list(ts_list, check_aligned=self.check_aligned)
def invert(self, time_series: TimeSeries, retain_inversion_state=False) -> TimeSeries:
if (self.inversion_state is None):
raise RuntimeError('Inversion state not set. Please call this transform on an input time series before calling invert(). If you are trying to call invert() a second time, please supply the option `retain_inversion_state=True` to the first call.')
(idx, d0, df, names) = self.inversion_state
ts = TimeSeries(OrderedDict(((n, time_series.univariates[n]) for n in time_series.names[d0:df])))
inverted = self.transforms[idx].invert(ts, retain_inversion_state)
assert (inverted.dim == len(names))
inverted = TimeSeries(OrderedDict(((name, var) for (name, var) in zip(names, inverted.univariates))))
if (not retain_inversion_state):
self.inversion_state = None
return inverted
def _invert(self, time_series: TimeSeries) -> TimeSeries:
logger.warning(f'_invert() should not be called by a transform of type {type(self).__name__}. Applying the identity.', stack_info=True)
return time_series
def __repr__(self):
return (('TransformStack(\n ' + ',\n '.join([repr(f) for f in self.transforms])) + '\n)') |
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if (not os.path.isabs(fn)):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines |
def test_partial_fstring():
N = 5
def fprog_partial():
with dace.tasklet:
i = 2
printf(f'''hi {N} {i}
''')
fprog_partial() |
def get_activations(image_iterator, images, model, verbose=True):
model.eval()
if (not sys.stdout.isatty()):
verbose = False
pred_arr = np.empty((images, FEATURE_DIM))
end = 0
t0 = time.time()
for batch in image_iterator:
if (not isinstance(batch, torch.Tensor)):
batch = batch[0]
start = end
batch_size = batch.shape[0]
end = (start + batch_size)
with torch.no_grad():
batch = batch.to(device)
pred = model(batch)[0]
batch_feature = pred.cpu().numpy().reshape(batch_size, (- 1))
pred_arr[start:end] = batch_feature
if verbose:
print('\rProcessed: {} time: {:.2f}'.format(end, (time.time() - t0)), end='', flush=True)
assert (end == images)
if verbose:
print(' done')
return pred_arr |
class Partition1(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'bert.encoder.5.intermediate.dense', 'l_1': 'bert.encoder.5.output.dense', 'l_2': 'bert.encoder.5.output.dropout', 'l_3': 'bert.encoder.5.output.LayerNorm', 'l_4': 'bert.encoder.6.attention.self.query', 'l_5': 'bert.encoder.6.attention.self.key', 'l_6': 'bert.encoder.6.attention.self.value', 'l_7': 'bert.encoder.6.attention.self.softmax', 'l_8': 'bert.encoder.6.attention.self.dropout', 'l_9': 'bert.encoder.6.attention.output.dense', 'l_10': 'bert.encoder.6.attention.output.dropout', 'l_11': 'bert.encoder.6.attention.output.LayerNorm', 'l_12': 'bert.encoder.6.intermediate.dense', 'l_13': 'bert.encoder.6.output.dense', 'l_14': 'bert.encoder.6.output.dropout', 'l_15': 'bert.encoder.6.output.LayerNorm', 'l_16': 'bert.encoder.7.attention.self.query', 'l_17': 'bert.encoder.7.attention.self.key', 'l_18': 'bert.encoder.7.attention.self.value', 'l_19': 'bert.encoder.7.attention.self.softmax', 'l_20': 'bert.encoder.7.attention.self.dropout', 'l_21': 'bert.encoder.7.attention.output.dense', 'l_22': 'bert.encoder.7.attention.output.dropout', 'l_23': 'bert.encoder.7.attention.output.LayerNorm', 'l_24': 'bert.encoder.7.intermediate.dense', 'l_25': 'bert.encoder.7.output.dense', 'l_26': 'bert.encoder.7.output.dropout', 'l_27': 'bert.encoder.7.output.LayerNorm', 'l_28': 'bert.encoder.8.attention.self.query', 'l_29': 'bert.encoder.8.attention.self.key', 'l_30': 'bert.encoder.8.attention.self.value', 'l_31': 'bert.encoder.8.attention.self.softmax', 'l_32': 'bert.encoder.8.attention.self.dropout', 'l_33': 'bert.encoder.8.attention.output.dense', 'l_34': 'bert.encoder.8.attention.output.dropout', 'l_35': 'bert.encoder.8.attention.output.LayerNorm', 'l_36': 'bert.encoder.8.intermediate.dense', 'l_37': 'bert.encoder.8.output.dense', 'l_38': 'bert.encoder.8.output.dropout', 'l_39': 'bert.encoder.8.output.LayerNorm', 'l_40': 'bert.encoder.9.attention.self.query', 'l_41': 'bert.encoder.9.attention.self.key', 'l_42': 'bert.encoder.9.attention.self.value', 'l_43': 'bert.encoder.9.attention.self.softmax', 'l_44': 'bert.encoder.9.attention.self.dropout', 'l_45': 'bert.encoder.9.attention.output.dense', 'l_46': 'bert.encoder.9.attention.output.dropout', 'l_47': 'bert.encoder.9.attention.output.LayerNorm', 'l_48': 'bert.encoder.9.intermediate.dense', 'l_49': 'bert.encoder.9.output.dense', 'l_50': 'bert.encoder.9.output.dropout', 'l_51': 'bert.encoder.9.output.LayerNorm', 'l_52': 'bert.encoder.10.attention.self.query', 'l_53': 'bert.encoder.10.attention.self.key', 'l_54': 'bert.encoder.10.attention.self.value', 'l_55': 'bert.encoder.10.attention.self.softmax', 'l_56': 'bert.encoder.10.attention.self.dropout', 'l_57': 'bert.encoder.10.attention.output.dense', 'l_58': 'bert.encoder.10.attention.output.dropout', 'l_59': 'bert.encoder.10.attention.output.LayerNorm', 'l_60': 'bert.encoder.10.intermediate.dense', 'l_61': 'bert.encoder.10.output.dense', 'l_62': 'bert.encoder.10.output.dropout', 'l_63': 'bert.encoder.10.output.LayerNorm', 'l_64': 'bert.encoder.11.attention.self.query', 'l_65': 'bert.encoder.11.attention.self.key', 'l_66': 'bert.encoder.11.attention.self.value'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_0 = torch.nn.functional.gelu(t_0)
t_0 = self.l_1(t_0)
t_0 = self.l_2(t_0)
t_0 = (t_0 + x0)
t_0 = self.l_3(t_0)
t_1 = self.l_4(t_0)
t_2 = self.l_5(t_0)
t_3 = self.l_6(t_0)
t_4 = t_1.size()
t_5 = t_2.size()
t_6 = t_3.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_7 = t_4[0]
t_8 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_7, t_8, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_9, t_8, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_7 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_3.view(t_7, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_4, t_5)
t_4 = math.sqrt(64)
t_4 = (t_5 / t_4)
t_4 = (t_4 + attention_mask)
t_4 = self.l_7(t_4)
t_4 = self.l_8(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (1024,))
t_5 = t_4[0]
t_9 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_5, t_9, t_4)
t_4 = self.l_9(t_4)
t_4 = self.l_10(t_4)
t_0 = (t_4 + t_0)
t_0 = self.l_11(t_0)
t_4 = self.l_12(t_0)
t_4 = torch.nn.functional.gelu(t_4)
t_4 = self.l_13(t_4)
t_4 = self.l_14(t_4)
t_0 = (t_4 + t_0)
t_0 = self.l_15(t_0)
t_4 = self.l_16(t_0)
t_9 = self.l_17(t_0)
t_5 = self.l_18(t_0)
t_6 = t_4.size()
t_8 = t_9.size()
t_7 = t_5.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_3 = t_6[0]
t_2 = t_6[1]
t_1 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_3, t_2, t_1, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_1 = t_8[0]
t_2 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_1, t_2, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_3 = t_7[0]
t_2 = t_7[1]
t_1 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_3, t_2, t_1, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_6, t_8)
t_6 = math.sqrt(64)
t_6 = (t_8 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_19(t_6)
t_6 = self.l_20(t_6)
t_7 = torch.matmul(t_6, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_6 = t_7.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_8 = t_6[0]
t_1 = t_6[1]
t_6 = t_6[2]
t_6 = t_7.view(t_8, t_1, t_6)
t_6 = self.l_21(t_6)
t_6 = self.l_22(t_6)
t_0 = (t_6 + t_0)
t_0 = self.l_23(t_0)
t_6 = self.l_24(t_0)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_25(t_6)
t_6 = self.l_26(t_6)
t_0 = (t_6 + t_0)
t_0 = self.l_27(t_0)
t_6 = self.l_28(t_0)
t_1 = self.l_29(t_0)
t_8 = self.l_30(t_0)
t_7 = t_6.size()
t_2 = t_1.size()
t_3 = t_8.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_5 = t_7[0]
t_9 = t_7[1]
t_4 = t_7[2]
t_7 = t_7[3]
t_7 = t_6.view(t_5, t_9, t_4, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_4 = t_2[0]
t_9 = t_2[1]
t_5 = t_2[2]
t_2 = t_2[3]
t_2 = t_1.view(t_4, t_9, t_5, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_5 = t_3[0]
t_9 = t_3[1]
t_4 = t_3[2]
t_3 = t_3[3]
t_3 = t_8.view(t_5, t_9, t_4, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_2 = t_2.transpose((- 1), (- 2))
t_2 = torch.matmul(t_7, t_2)
t_7 = math.sqrt(64)
t_7 = (t_2 / t_7)
t_7 = (t_7 + attention_mask)
t_7 = self.l_31(t_7)
t_7 = self.l_32(t_7)
t_3 = torch.matmul(t_7, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_3 = t_3.contiguous()
t_7 = t_3.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (1024,))
t_2 = t_7[0]
t_4 = t_7[1]
t_7 = t_7[2]
t_7 = t_3.view(t_2, t_4, t_7)
t_7 = self.l_33(t_7)
t_7 = self.l_34(t_7)
t_0 = (t_7 + t_0)
t_0 = self.l_35(t_0)
t_7 = self.l_36(t_0)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_37(t_7)
t_7 = self.l_38(t_7)
t_0 = (t_7 + t_0)
t_0 = self.l_39(t_0)
t_7 = self.l_40(t_0)
t_4 = self.l_41(t_0)
t_2 = self.l_42(t_0)
t_3 = t_7.size()
t_9 = t_4.size()
t_5 = t_2.size()
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_8 = t_3[0]
t_1 = t_3[1]
t_6 = t_3[2]
t_3 = t_3[3]
t_3 = t_7.view(t_8, t_1, t_6, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (16, 64))
t_6 = t_9[0]
t_1 = t_9[1]
t_8 = t_9[2]
t_9 = t_9[3]
t_9 = t_4.view(t_6, t_1, t_8, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_8 = t_5[0]
t_1 = t_5[1]
t_6 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_8, t_1, t_6, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_3, t_9)
t_3 = math.sqrt(64)
t_3 = (t_9 / t_3)
t_3 = (t_3 + attention_mask)
t_3 = self.l_43(t_3)
t_3 = self.l_44(t_3)
t_5 = torch.matmul(t_3, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_3 = t_5.size()
t_3 = t_3[slice(None, (- 2), None)]
t_3 = (t_3 + (1024,))
t_9 = t_3[0]
t_6 = t_3[1]
t_3 = t_3[2]
t_3 = t_5.view(t_9, t_6, t_3)
t_3 = self.l_45(t_3)
t_3 = self.l_46(t_3)
t_0 = (t_3 + t_0)
t_0 = self.l_47(t_0)
t_3 = self.l_48(t_0)
t_3 = torch.nn.functional.gelu(t_3)
t_3 = self.l_49(t_3)
t_3 = self.l_50(t_3)
t_0 = (t_3 + t_0)
t_0 = self.l_51(t_0)
t_3 = self.l_52(t_0)
t_6 = self.l_53(t_0)
t_9 = self.l_54(t_0)
t_5 = t_3.size()
t_1 = t_6.size()
t_8 = t_9.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_2 = t_5[0]
t_4 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_2, t_4, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_7 = t_1[0]
t_4 = t_1[1]
t_2 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_7, t_4, t_2, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_2 = t_8[0]
t_4 = t_8[1]
t_7 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_2, t_4, t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_1 = t_1.transpose((- 1), (- 2))
t_1 = torch.matmul(t_5, t_1)
t_5 = math.sqrt(64)
t_5 = (t_1 / t_5)
t_5 = (t_5 + attention_mask)
t_5 = self.l_55(t_5)
t_5 = self.l_56(t_5)
t_8 = torch.matmul(t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_5 = t_8.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (1024,))
t_1 = t_5[0]
t_7 = t_5[1]
t_5 = t_5[2]
t_5 = t_8.view(t_1, t_7, t_5)
t_5 = self.l_57(t_5)
t_5 = self.l_58(t_5)
t_0 = (t_5 + t_0)
t_0 = self.l_59(t_0)
t_5 = self.l_60(t_0)
t_5 = torch.nn.functional.gelu(t_5)
t_5 = self.l_61(t_5)
t_5 = self.l_62(t_5)
t_0 = (t_5 + t_0)
t_0 = self.l_63(t_0)
t_5 = self.l_64(t_0)
t_7 = self.l_65(t_0)
t_1 = self.l_66(t_0)
t_8 = t_5.size()
t_4 = t_7.size()
t_2 = t_1.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_9 = t_8[0]
t_6 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_5.view(t_9, t_6, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_3 = t_4[0]
t_6 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_7.view(t_3, t_6, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_9 = t_2[0]
t_6 = t_2[1]
t_3 = t_2[2]
t_2 = t_2[3]
t_2 = t_1.view(t_9, t_6, t_3, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_8, t_4)
return list(flatten((t_0, t_2, t_4)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def load_id_mapping(filename_list, i):
id_map_dict = {}
for filename in filename_list:
f = open(filename, 'r')
while True:
line = f.readline()
if (len(line) == 0):
break
slots = re.split('\\t', line)
old_id = slots[2]
new_id = slots[(i + 2)]
new_id = re.split('\\n', new_id)
id_map_dict[new_id[0]] = old_id
f.close()
return id_map_dict |
def get_device_for_rank(args, rank, local_rank):
nnodes = args.nnodes
ngpus_per_node = get_ngpus_per_node(args)
if hasattr(args, 'stage_to_device_map'):
stage_to_device_map = args.stage_to_device_map
cuda_device_id = stage_to_device_map[rank]
if (nnodes > 1):
for (node_idx, x) in enumerate(ngpus_per_node):
if (cuda_device_id >= x):
cuda_device_id -= x
else:
break
else:
raise ValueError(f"Can't determine device index. rank={rank}, stage_to_device_map={stage_to_device_map}, global_device_id={cuda_device_id}, nnodes={nnodes}, ngpus_per_node={ngpus_per_node}")
local_device_id = cuda_device_id
else:
local_device_id = local_rank
device = torch.device(('cpu' if args.cpu else f'cuda:{local_device_id}'))
return device |
def create_capsule_markers(marker_ref, oMg, d, l):
from copy import deepcopy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
displacment = pin.SE3.Identity()
displacment.translation[2] = (l / 2.0)
oMsphere_1 = (oMg * displacment)
displacment.translation[2] = ((- l) / 2.0)
oMsphere_2 = (oMg * displacment)
marker_cylinder = marker_ref
marker_cylinder.type = Marker.CYLINDER
marker_cylinder.scale = Point(d, d, l)
marker_cylinder.pose = SE3ToROSPose(oMg)
marker_sphere_1 = deepcopy(marker_ref)
marker_sphere_1.id += 10000
marker_sphere_1.type = Marker.SPHERE
marker_sphere_1.scale = Point(d, d, d)
marker_sphere_1.pose = SE3ToROSPose(oMsphere_1)
marker_sphere_2 = deepcopy(marker_ref)
marker_sphere_2.id += 20000
marker_sphere_2.type = Marker.SPHERE
marker_sphere_2.scale = Point(d, d, d)
marker_sphere_2.pose = SE3ToROSPose(oMsphere_2)
return [marker_cylinder, marker_sphere_1, marker_sphere_2] |
def resize_and_convert(img, size, resample, quality=100):
img = trans_fn.resize(img, size, resample)
img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format='jpeg', quality=quality)
val = buffer.getvalue()
return val |
class Attribute():
def __init__(self, id, subject, attribute, synset):
self.id = id
self.subject = subject
self.attribute = attribute
self.synset = synset
def __str__(self):
return ('%d: %s is %s' % (self.id, self.subject, self.attribute))
def __repr__(self):
return str(self) |
class Agent(object):
def feed_context(self, context):
pass
def read(self, inpt):
pass
def write(self):
pass
def choose(self):
pass
def update(self, agree, reward):
pass |
class Integral(Struct):
def __init__(self, name, order=1, coors=None, weights=None, bounds=None, tp_fix=1.0, weight_fix=1.0, symmetric=False):
self.name = name
self.qps = {}
if (coors is None):
self.mode = 'builtin'
else:
self.mode = 'custom'
self.coors = coors
self.weights = weights
self.bounds = bounds
self.tp_fix = tp_fix
self.weight_fix = weight_fix
self.symmetric = symmetric
self.order = 0
if (order in ('auto', 'custom', 'a', 'c')):
self.order = (- 1)
else:
self.order = int(order)
def get_qp(self, geometry):
if (geometry in self.qps):
qp = self.qps[geometry]
else:
if (self.mode == 'builtin'):
qp = QuadraturePoints.from_table(geometry, self.order)
else:
qp = QuadraturePoints(None, coors=self.coors, weights=self.weights, bounds=self.bounds, tp_fix=self.tp_fix, weight_fix=self.weight_fix, symmetric=self.symmetric)
self.qps[geometry] = qp
return (qp.coors, qp.weights)
def integrate(self, function, order=1, geometry='1_2'):
qp = QuadraturePoints.from_table(geometry, order)
fvals = function(qp.coors)
val = nm.sum((fvals * qp.weights))
return val |
class Writer(object):
def __init__(self, t, location):
self.location = location
self.t = t
def write(self, string):
with self.t.location(*self.location):
sys.stdout.write('\x1b[K')
print(string)
def flush(self):
return |
def _indent_to_level(text: str, level: int) -> str:
return textwrap.indent(text, ((' ' * 4) * level)).lstrip() |
def _get_token_label(utt_char_range, start_char_pos, exclusive_end_char_pos):
end_char_pos = (exclusive_end_char_pos - 1)
slot_at_boundary = True
for (idx, (start, end)) in enumerate(utt_char_range):
if (start <= start_char_pos <= end):
if (start != start_char_pos):
slot_at_boundary = False
start_tok_pos = idx
if (start <= end_char_pos <= end):
if (end != end_char_pos):
slot_at_boundary = False
end_tok_pos = idx
assert (start_tok_pos <= end_tok_pos)
return (start_tok_pos, end_tok_pos, slot_at_boundary) |
class ParseRc(Action):
def __call__(self, parser, namespace, values, option_string=None):
pars = eval((('{' + values) + '}'))
setattr(namespace, self.dest, pars) |
def validate_point_inside_bounds(x, y, imWidth, imHeight):
if ((x < 0) or (x > imWidth)):
raise Exception(('X value (%s) not valid. Image dimensions: (%s,%s)' % (xmin, imWidth, imHeight)))
if ((y < 0) or (y > imHeight)):
raise Exception(('Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s' % (ymin, imWidth, imHeight))) |
def get_spline_knot_values(order):
knot_values = {0: [1], 1: [1], 2: [6, 1], 3: [4, 1], 4: [230, 76, 1], 5: [66, 26, 1]}
return knot_values[order] |
def pwdist_gauss(M1, S1, M2, S2, symmetric=False, return_dmeans=False, nworkers=1, commute=False):
(n1, n2) = (len(M1), len(M2))
if symmetric:
pairs = list(itertools.combinations(range(n1), 2))
else:
pairs = list(itertools.product(range(n1), range(n2)))
D = torch.zeros((n1, n2)).to(device)
if (nworkers > 1):
results = Parallel(n_jobs=nworkers, verbose=1, backend='threading')((delayed(wasserstein_gauss_distance)(M1[i], M2[j], S1[i], S2[j], squared=True) for (i, j) in pairs))
for ((i, j), d) in zip(pairs, results):
D[(i, j)] = d
if symmetric:
D[(j, i)] = D[(i, j)]
else:
for (i, j) in tqdm(pairs, leave=False):
D[(i, j)] = wasserstein_gauss_distance(M1[i], M2[j], S1[i], S2[j], squared=True, commute=commute)
if symmetric:
D[(j, i)] = D[(i, j)]
if return_dmeans:
D_means = torch.cdist(M1, M2)
return (D, D_means)
else:
return D |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str, default='bert-base-multilingual-cased')
parser.add_argument('--pooler', type=str, choices=['cls', 'cls_before_pooler', 'avg', 'avg_top2', 'avg_first_last'], default='avg', help='Which pooler to use')
parser.add_argument('--experiment_name', type=str, default='mldoc', help='mlflow experiment name')
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--gradient_accumulation_steps', type=int, default=1)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--dev', action='store_true')
parser.add_argument('--do_finetune', action='store_true')
args = parser.parse_args()
set_seeds(args.seed)
mlflow_writer = get_mlflow_writer(args.experiment_name, 'mlruns', OmegaConf.create({'eval_args': vars(args)}))
dataset_path = 'downstreams/cross-lingual-transfer/data'
eval_data = dict()
eval_langs = ['en', 'fr', 'de', 'ja', 'zh', 'it', 'ru', 'es']
for lang in eval_langs:
print(lang)
eval_data[lang] = load_mldoc_data(dataset_path, lang)
(train_texts, train_labels) = eval_data['en']['train']
(val_texts, val_labels) = eval_data['en']['dev']
if ('xlm' in args.model_name_or_path):
tokenizer = XLMRobertaTokenizer.from_pretrained(args.model_name_or_path)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
train_dataset = MLDocDataset(train_encodings, train_labels)
val_dataset = MLDocDataset(val_encodings, val_labels)
config = AutoConfig.from_pretrained(args.model_name_or_path)
config.pooler_type = args.pooler
config.num_labels = 4
config.do_finetune = args.do_finetune
config.problem_type = None
if ('xlm' in args.model_name_or_path):
model = RobertaForSequenceClassificationWithPooler.from_pretrained(args.model_name_or_path, config=config)
elif any([(name in args.model_name_or_path) for name in ['bert', 'LaBSE']]):
model = BertForSequenceClassificationWithPooler.from_pretrained(args.model_name_or_path, config=config)
else:
raise NotImplementedError
if (not config.do_finetune):
for (name, param) in model.named_parameters():
if ('classifier' not in name):
param.requires_grad = False
else:
print(name)
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
model.to(device)
model.train()
training_args = TrainingArguments(output_dir='./results', num_train_epochs=5, per_device_train_batch_size=32, gradient_accumulation_steps=args.gradient_accumulation_steps, per_device_eval_batch_size=128, learning_rate=args.learning_rate, weight_decay=args.weight_decay, logging_dir='./logs', logging_steps=10, eval_steps=10, metric_for_best_model='accuracy', load_best_model_at_end=True, evaluation_strategy='steps', seed=args.seed)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, compute_metrics=compute_metrics)
trainer.train()
res = trainer.evaluate()
print(res)
mlflow_writer.log_metric('en-dev', res['eval_accuracy'])
if (not args.dev):
lang_results = []
for lang in eval_langs[1:]:
print(lang)
(test_texts, test_labels) = eval_data[lang]['test']
test_encodings = tokenizer(test_texts, truncation=True, padding=True)
test_dataset = MLDocDataset(test_encodings, test_labels)
res = trainer.evaluate(test_dataset)
print(trainer.evaluate(test_dataset))
lang_results.append(res['eval_accuracy'])
mlflow_writer.log_metric(lang, res['eval_accuracy'])
mlflow_writer.log_metric('Avg.', np.mean(lang_results)) |
def eval_step(ood=False, n_evals=10):
model.eval()
total_loss = 0.0
total_acc = 0.0
with torch.no_grad():
for _ in range(n_evals):
(data, label) = data_call(args.batch_size, args.gt_rules, args.data_seed, ood)
data = torch.Tensor(data).to(device)
label = torch.Tensor(label).to(device)
(out, score) = model(data)
loss = criterion(out, label)
acc = torch.eq((out >= 0.0), label).double().mean()
total_loss += loss.item()
total_acc += acc.item()
return ((total_loss / float(n_evals)), ((total_acc * 100.0) / float(n_evals))) |
def run_metrics(metricObject, args):
metricObject.compute_metrics_per_sequence(**args)
return metricObject |
def _save(f, verts, faces, verts_uv=None, map_file=None, rgb=None, idx=None, double_sided=True, decimal_places: Optional[int]=None):
if (decimal_places is None):
float_str = '%f'
else:
float_str = ('%' + ('.%df' % decimal_places))
lines = ''
(V, D) = verts.shape
for i in range(V):
vert = [(float_str % verts[(i, j)]) for j in range(D)]
lines += ('v %s\n' % ' '.join(vert))
if (verts_uv is not None):
(V, D) = verts_uv.shape
for i in range(V):
vert_uv = [(float_str % verts_uv[(i, j)]) for j in range(D)]
lines += ('vt %s\n' % ' '.join(vert_uv))
if (map_file is not None):
lines += f'''usemtl {os.path.basename(map_file).split('.')[0]}
'''
elif (rgb is not None):
lines += f'''usemtl color_{idx}
'''
if (faces != []):
(F, P) = faces.shape
for i in range(F):
if (verts_uv is not None):
face = [('%d/%d' % ((faces[(i, j)] + 1), (faces[(i, j)] + 1))) for j in range(P)]
else:
face = [('%d' % (faces[(i, j)] + 1)) for j in range(P)]
lines += ('f %s\n' % ' '.join(face))
if double_sided:
if (verts_uv is not None):
face = [('%d/%d' % ((faces[(i, j)] + 1), (faces[(i, j)] + 1))) for j in reversed(range(P))]
else:
face = [('%d' % (faces[(i, j)] + 1)) for j in reversed(range(P))]
lines += ('f %s\n' % ' '.join(face))
else:
tqdm.write(f'face = []')
f.write(lines) |
_module()
class CustomGaussianFocalLoss(nn.Module):
def __init__(self, alpha: float=(- 1), beta: float=4, gamma: float=2, sigmoid_clamp: float=0.0001, ignore_high_fp: float=(- 1.0), reduction='mean', loss_weight=1.0):
super(CustomGaussianFocalLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.sigmoid_clmap = sigmoid_clamp
self.ignore_high_fp = ignore_high_fp
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, pos_inds, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_reg = (self.loss_weight * custom_gaussian_focal_loss(pred, target, weight, pos_inds=pos_inds, alpha=self.alpha, gamma=self.gamma, ignore_high_fp=self.ignore_high_fp, reduction=reduction, avg_factor=avg_factor))
return loss_reg |
def create_attack(attack: str, *args, **kwargs) -> Attack:
if (not any([(attack.lower() == attack_model.__name__.lower()) for attack_model in ATTACK_TYPE.__args__])):
raise ValueError(f'The attack {attack} is not in {ATTACK_TYPE.__args__}')
return globals()[attack](*args, **kwargs) |
def create_dirs(instructions, session_nums, object_names, re_extract):
if (instructions is None):
instructions = ['handoff', 'use']
else:
instructions = instructions.split(',')
if (session_nums is None):
n_sessions = len(dataset_utils.use_data_dirs)
session_nums = ['{:d}'.format(s) for s in range(1, (n_sessions + 1))]
elif ('-' in session_nums):
(start, end) = session_nums.split('-')
session_nums = ['{:d}'.format(s) for s in range(int(start), (int(end) + 1))]
else:
session_nums = session_nums.split(',')
if (object_names is not None):
object_names = object_names.split(',')
for instruction in instructions:
data_dirs = getattr(dataset_utils, '{:s}_data_dirs'.format(instruction))
for session_num in session_nums:
session_name = 'full{:s}_{:s}'.format(session_num, instruction)
data_dir = data_dirs[(int(session_num) - 1)]
base_dir = osp.join(data_dir, session_name)
bags_dir = osp.join(data_dir, 'rosbags', session_name)
bags = {}
for bag_filename in os.listdir(bags_dir):
if (bag_filename[(- 4):] != '.bag'):
continue
if ('hand-pose' in bag_filename):
continue
object_name = '_'.join(bag_filename.split('_')[:(- 1)])
if (object_names is not None):
if (object_name not in object_names):
continue
elif (object_name in ignored_objects):
continue
if (object_name in bags):
bags[object_name].append(bag_filename)
else:
bags[object_name] = [bag_filename]
for (object_name, bag_filenames) in bags.items():
make_dir(base_dir, object_name, bag_filenames[0], count=0, re_extract=re_extract)
for (count, bag_filename) in enumerate(bag_filenames[1:]):
make_dir(base_dir, object_name, bag_filename, count=(count + 1), re_extract=re_extract) |
class GaussianPrior(Prior):
def __init__(self, size, mean=0, var=1, isotropic=True):
self.size = size
self.mean = mean
self.var = var
self.isotropic = isotropic
self.repr_init()
self.sigma = np.sqrt(var)
self.a = (1 / var)
self.b = (mean / var)
def sample(self):
X = (self.mean + (self.sigma * np.random.standard_normal(self.size)))
return X
def math(self):
return '$\\mathcal{N}$'
def second_moment(self):
return ((self.mean ** 2) + self.var)
def forward_second_moment_FG(self, tx_hat):
a = (tx_hat + self.a)
return normal.tau(a, self.b)
def scalar_forward_mean(self, ax, bx):
a = (ax + self.a)
b = (bx + self.b)
return (b / a)
def scalar_forward_variance(self, ax, bx):
a = (ax + self.a)
return (1 / a)
def scalar_log_partition(self, ax, bx):
a = (ax + self.a)
b = (bx + self.b)
A = (normal.A(a, b) - normal.A(self.a, self.b))
return A
def compute_forward_posterior(self, ax, bx):
a = (ax + self.a)
b = (bx + self.b)
rx = (b / a)
vx = (1 / a)
return (rx, vx)
def compute_log_partition(self, ax, bx):
a = (ax + self.a)
b = (bx + self.b)
A = (normal.A(a, b) - normal.A(self.a, self.b))
return A.mean()
def compute_forward_error(self, ax):
a = (ax + self.a)
vx = (1 / a)
return vx
def compute_forward_v_BO(self, ax, tx0_hat):
a = (ax + self.a)
vx = (1 / a)
return vx
def compute_forward_message(self, ax, bx):
ax_new = (self.a * np.ones_like(ax))
bx_new = (self.b * np.ones_like(bx))
return (ax_new, bx_new)
def compute_forward_state_evolution(self, ax):
ax_new = self.a
return ax_new
def compute_forward_state_evolution_BO(self, ax, tx0_hat):
ax_new = self.a
return ax_new
def b_measure(self, mx_hat, qx_hat, tx0_hat, f):
a0 = (self.a + tx0_hat)
b0 = self.b
r0 = (b0 / a0)
v0 = (1 / a0)
mu = gaussian_measure((mx_hat * r0), np.sqrt((qx_hat + ((mx_hat ** 2) * v0))), f)
return mu
def bx_measure(self, mx_hat, qx_hat, tx0_hat, f):
a0 = (self.a + tx0_hat)
b0 = self.b
r0 = (b0 / a0)
v0 = (1 / a0)
ax_star = ((mx_hat / qx_hat) * mx_hat)
def r_times_f(bx):
bx_star = ((mx_hat / qx_hat) * bx)
r = ((b0 + bx_star) / (a0 + ax_star))
return (r * f(bx))
mu = gaussian_measure((mx_hat * r0), np.sqrt((qx_hat + ((mx_hat ** 2) * v0))), r_times_f)
return mu
def beliefs_measure(ax, f):
mu = gaussian_measure((ax * self.r0), np.sqrt((ax + ((ax ** 2) * self.v0))), f)
return mu
def measure(self, f):
return gaussian_measure(self.mean, self.sigma, f)
def compute_mutual_information(self, ax):
a = (ax + self.a)
I = (0.5 * np.log((a * self.var)))
return I
def compute_free_energy(self, ax):
tau_x = self.second_moment()
I = self.compute_mutual_information(ax)
A = (((0.5 * ax) * tau_x) - I)
return A |
def is_invertible_module(module_in, test_input_shape, test_input_dtype=torch.float32, atol=1e-06, random_seed=42):
if isinstance(module_in, InvertibleModuleWrapper):
module_in = module_in._fn
if (not hasattr(module_in, 'inverse')):
return False
def _type_check_input_shape(test_input_shape):
if isinstance(test_input_shape, (tuple, list)):
if all([isinstance(e, int) for e in test_input_shape]):
return True
elif all([isinstance(e, (tuple, list)) for e in test_input_shape]):
return all([isinstance(ee, int) for e in test_input_shape for ee in e])
else:
return False
else:
return False
if (not _type_check_input_shape(test_input_shape)):
raise ValueError('test_input_shape should be of type Tuple[int, ...] or Tuple[Tuple[int, ...], ...], but {} found'.format(type(test_input_shape)))
if (not isinstance(test_input_shape[0], (tuple, list))):
test_input_shape = (test_input_shape,)
def _check_inputs_allclose(inputs, reference, atol):
for (inp, ref) in zip(inputs, reference):
if (not torch.allclose(inp, ref, atol=atol)):
return False
return True
def _pack_if_no_tuple(x):
if (not isinstance(x, tuple)):
return (x,)
return x
with torch.no_grad():
torch.manual_seed(random_seed)
test_inputs = tuple([torch.rand(shape, dtype=test_input_dtype) for shape in test_input_shape])
if any([torch.equal(torch.zeros_like(e), e) for e in test_inputs]):
warnings.warn('Some inputs were detected to be all zeros, you might want to set a different random_seed.')
if (not _check_inputs_allclose(_pack_if_no_tuple(module_in.inverse(*_pack_if_no_tuple(module_in(*test_inputs)))), test_inputs, atol=atol)):
return False
test_outputs = _pack_if_no_tuple(module_in(*test_inputs))
if any([torch.equal(torch.zeros_like(e), e) for e in test_outputs]):
warnings.warn('Some outputs were detected to be all zeros, you might want to set a different random_seed.')
if (not _check_inputs_allclose(_pack_if_no_tuple(module_in(*_pack_if_no_tuple(module_in.inverse(*test_outputs)))), test_outputs, atol=atol)):
return False
test_reconstructed_inputs = _pack_if_no_tuple(module_in.inverse(*test_outputs))
def _test_shared(inputs, outputs, msg):
shared = set(inputs)
shared_outputs = set(outputs)
if (len(inputs) != len(shared)):
warnings.warn('Some inputs (*x) share the same tensor, are you sure this is what you want? ({})'.format(msg))
if (len(outputs) != len(shared_outputs)):
warnings.warn('Some outputs (*y) share the same tensor, are you sure this is what you want? ({})'.format(msg))
if any([(inp in shared) for inp in shared_outputs]):
warnings.warn('Some inputs (*x) and outputs (*y) share the same tensor, this is typically not a good function to use with memcnn.InvertibleModuleWrapper as it might increase memory usage. E.g. an identity function. ({})'.format(msg))
_test_shared(test_inputs, test_outputs, msg='forward')
_test_shared(test_reconstructed_inputs, test_outputs, msg='inverse')
return True |
def get_option_reward(purchased_options, goal_options):
purchased_options = [normalize_color(o) for o in purchased_options]
goal_options = [normalize_color(o) for o in goal_options]
num_option_matches = 0
for g_option in goal_options:
for p_option in purchased_options:
score = fuzz.token_set_ratio(p_option, g_option)
if (score > 85):
num_option_matches += 1
break
r_option = ((num_option_matches / len(goal_options)) if (len(goal_options) > 0) else None)
return (r_option, num_option_matches) |
def get_cifar10(data_root, num_labeled, labeled_aug='weak', unlabeled_aug='strong', sample_mode='label_dist', whiten=True, incl_labeled_in_unlabeled=True):
base_dataset = datasets.CIFAR10(data_root, train=True, download=True)
if (num_labeled is None):
num_labeled = len(base_dataset)
if whiten:
(mean, std) = (cifar10_mean, cifar10_std)
else:
(mean, std) = (default_mean, default_std)
transform_labeled = get_transform(mean, std, mode=labeled_aug)
transform_unlabeled = get_transform(mean, std, mode=unlabeled_aug)
transform_val = get_transform(mean, std, mode='none')
(labeled_idxs, unlabeled_idxs) = labeled_unlabeled_split(base_dataset.targets, num_labeled, sample_mode=sample_mode, incl_labeled_in_unlabeled=incl_labeled_in_unlabeled)
labeled_dataset = CIFAR10(data_root, labeled_idxs, train=True, transform=transform_labeled)
unlabeled_dataset = CIFAR10(data_root, unlabeled_idxs, train=True, transform=transform_unlabeled, target_idx=True)
test_dataset = CIFAR10(data_root, train=False, transform=transform_val, download=True)
logger.info('dataset: CIFAR10')
logger.info(f'labeled examples: {len(labeled_idxs)}')
logger.info(f'unlabeled examples: {len(unlabeled_idxs)}')
return dict(base=base_dataset, labeled=labeled_dataset, unlabeled=unlabeled_dataset, test=test_dataset) |
class _open_zipfile_reader(_opener):
def __init__(self, name_or_buffer) -> None:
super(_open_zipfile_reader, self).__init__(torch._C.PyTorchFileReader(name_or_buffer)) |
class _unsafe_first_element_pointer(object):
def __init__(self, arr):
self.base = arr
def __array_interface__(self):
i = dict(shape=(), typestr='|V0', data=(self.base.__array_interface__['data'][0], False), strides=(), version=3)
return i |
def pytest_collection_modifyitems(config, items):
if config.getoption('--remote'):
return
skipper = pytest.mark.skip(reason='need --remote option to run')
for item in items:
if ('remote' in item.keywords):
item.add_marker(skipper) |
(name='form_field')
def do_form_field(parser, token):
parts = token.contents.split(' ', 2)
if (len(parts) < 2):
error_text = '%r tag must have the form field name as the first value, followed by optional key="value" attributes.'
raise template.TemplateSyntaxError((error_text % parts[0]))
html_attrs = {}
if (len(parts) == 3):
raw_args = list(args_split(parts[2]))
if ((len(raw_args) % 2) != 0):
raise template.TemplateSyntaxError(('%r tag received the incorrect number of key=value arguments.' % parts[0]))
for x in range(0, len(raw_args), 2):
html_attrs[str(raw_args[x])] = Variable(raw_args[(x + 1)])
return FormFieldNode(parts[1], html_attrs) |
def prepare_data(args, train, return_full_dataset=False):
if (args.root_dir is None):
args.root_dir = dataset_attributes[args.dataset]['root_dir']
if (args.shift_type == 'confounder'):
return prepare_confounder_data(args, train, return_full_dataset)
elif args.shift_type.startswith('label_shift'):
assert (not return_full_dataset)
return prepare_label_shift_data(args, train) |
def write_original_conll(fn, conll_original):
with open(fn, 'w') as fh:
for sentence in conll_original:
for entry in sentence[1:]:
fh.write('\t'.join([str(entry.id), entry.form, '_', entry.cpos, entry.pos, '_', str(entry.parent_id), entry.relation, '_', '_']))
fh.write('\n')
fh.write('\n') |
class RandomActiveLearningNodeNBA(LearningNodeNBA, RandomActiveLeafClass):
def __init__(self, initial_stats=None, max_features=2, random_state=None):
super().__init__(initial_stats)
self.max_features = max_features
self.feature_indices = np.array([])
self.random_state = random_state
self._random_state = check_random_state(self.random_state) |
class TestEMAGPU(unittest.TestCase):
def assertTorchAllClose(self, x, y, atol=1e-08, rtol=1e-05, msg=None):
diff = (x.float() - y.float())
diff_norm = torch.norm(diff)
other_norm = torch.norm(y.float())
if (msg is None):
msg = '|input - other| > {} + {} * |other|'.format(atol, rtol)
self.assertLessEqual(diff_norm, (atol + (rtol * other_norm)), msg=msg)
def test_ema(self):
model = DummyModule()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
state = deepcopy(model.state_dict())
config = EMAConfig()
ema = EMA(model, config)
ema._set_decay(config.ema_decay)
self.assertEqual(ema.get_decay(), config.ema_decay)
self.assertEqual(ema.get_model(), ema.model)
self.assertEqual(len(ema.fp32_params), 0)
x = torch.randn(32)
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
ema.step(model)
ema_state_dict = ema.get_model().state_dict()
for (key, param) in model.state_dict().items():
prev_param = state[key]
ema_param = ema_state_dict[key]
if ('version' in key):
continue
self.assertTorchAllClose(ema_param, ((config.ema_decay * prev_param) + ((1 - config.ema_decay) * param)))
self.assertEqual(len(ema.fp32_params), 0)
model2 = DummyModule()
ema.reverse(model2)
for (key, param) in model2.state_dict().items():
ema_param = ema_state_dict[key]
self.assertTrue(torch.allclose(ema_param, param))
def test_ema_fp32(self):
model = DummyModule().half()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
state = deepcopy(model.state_dict())
config = EMAConfig(ema_fp32=True)
ema = EMA(model, config)
x = torch.randn(32)
y = model(x.half())
loss = y.sum()
loss.backward()
optimizer.step()
ema.step(model)
for (key, param) in model.state_dict().items():
prev_param = state[key]
ema_param = ema.get_model().state_dict()[key]
if ('version' in key):
continue
self.assertIn(key, ema.fp32_params)
self.assertLessEqual(torch.norm((ema_param.float() - ((config.ema_decay * prev_param.float()) + ((1 - config.ema_decay) * param.float())).half().float())), torch.norm((ema_param.float() - ((config.ema_decay * prev_param) + ((1 - config.ema_decay) * param)).float())))
self.assertTorchAllClose(ema_param, ((config.ema_decay * prev_param.float()) + ((1 - config.ema_decay) * param.float())).half())
def test_ema_fp16(self):
model = DummyModule().half()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
state = deepcopy(model.state_dict())
config = EMAConfig(ema_fp32=False)
ema = EMA(model, config)
self.assertEqual(len(ema.fp32_params), 0)
x = torch.randn(32)
y = model(x.half())
loss = y.sum()
loss.backward()
optimizer.step()
ema.step(model)
for (key, param) in model.state_dict().items():
prev_param = state[key]
ema_param = ema.get_model().state_dict()[key]
if ('version' in key):
continue
self.assertLessEqual(torch.norm((ema_param.float() - ((config.ema_decay * prev_param) + ((1 - config.ema_decay) * param)).float())), torch.norm((ema_param.float() - ((config.ema_decay * prev_param.float()) + ((1 - config.ema_decay) * param.float())).half().float())))
self.assertTorchAllClose(ema_param, ((config.ema_decay * prev_param) + ((1 - config.ema_decay) * param)))
self.assertEqual(len(ema.fp32_params), 0) |
class Context(with_metaclass(ContextMeta)):
_legacy_resolve_mode = False
_fast_resolve_mode = False
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
self.blocks = dict(((k, [v]) for (k, v) in iteritems(blocks)))
if self._fast_resolve_mode:
self.resolve_or_missing = MethodType(resolve_or_missing, self)
def super(self, name, current):
try:
blocks = self.blocks[name]
index = (blocks.index(current) + 1)
blocks[index]
except LookupError:
return self.environment.undefined(('there is no parent block called %r.' % name), name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
if self._legacy_resolve_mode:
rv = resolve_or_missing(self, key)
else:
rv = self.resolve_or_missing(key)
if (rv is missing):
return self.environment.undefined(name=key)
return rv
def resolve_or_missing(self, key):
if self._legacy_resolve_mode:
rv = self.resolve(key)
if isinstance(rv, Undefined):
rv = missing
return rv
return resolve_or_missing(self, key)
def get_exported(self):
return dict(((k, self.vars[k]) for k in self.exported_vars))
def get_all(self):
if (not self.vars):
return self.parent
if (not self.parent):
return self.vars
return dict(self.parent, **self.vars)
def call(__self, __obj, *args, **kwargs):
if __debug__:
__traceback_hide__ = True
if hasattr(__obj, '__call__'):
fn = __obj.__call__
for fn_type in ('contextfunction', 'evalcontextfunction', 'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if callable(__obj):
if (getattr(__obj, 'contextfunction', False) is True):
args = ((__self,) + args)
elif (getattr(__obj, 'evalcontextfunction', False) is True):
args = ((__self.eval_ctx,) + args)
elif (getattr(__obj, 'environmentfunction', False) is True):
args = ((__self.environment,) + args)
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because a callable raised a StopIteration exception')
def derived(self, locals=None):
context = new_context(self.environment, self.name, {}, self.get_all(), True, None, locals)
context.eval_ctx = self.eval_ctx
context.blocks.update(((k, list(v)) for (k, v) in iteritems(self.blocks)))
return context
def _all(meth):
def proxy(self):
return getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return ((name in self.vars) or (name in self.parent))
def __getitem__(self, key):
item = self.resolve_or_missing(key)
if (item is missing):
raise KeyError(key)
return item
def __repr__(self):
return ('<%s %s of %r>' % (self.__class__.__name__, repr(self.get_all()), self.name)) |
class Identity(BaseFunction):
def tf(self, x):
return (tf.identity(x) / self.norm)
def sp(self, x):
return (x / self.norm)
def np(self, x):
return (np.array(x) / self.norm) |
def flat_lstm_cell(input, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = (((torch.mm(input, w_ih.t()) + torch.mm(hx, w_hh.t())) + b_ih) + b_hh)
(ingate, forgetgate, cellgate, outgate) = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = ((forgetgate * cx) + (ingate * cellgate))
hy = (outgate * torch.tanh(cy))
return (hy, cy) |
class TFAutoModelForCausalLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def evaluate(model, data):
model.eval()
with torch.no_grad():
logits = model(data)
outs = {}
for key in ['train', 'val', 'test']:
mask = data['{}_mask'.format(key)]
loss = F.nll_loss(logits[mask], data.y[mask]).item()
pred = logits[mask].max(1)[1]
acc = (pred.eq(data.y[mask]).sum().item() / mask.sum().item())
outs['{} loss'.format(key)] = loss
outs['{} acc'.format(key)] = acc
return outs |
class RandomPolicy(SerializablePolicy):
def __init__(self, action_space):
self.action_space = action_space
def get_action(self, *args, **kwargs):
return (self.action_space.sample(), {}) |
class BoundArguments(object):
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
def signature(self):
return self._signature
def args(self):
args = []
for (param_name, param) in self._signature.parameters.items():
if ((param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY)) or param._partial_kwarg):
break
try:
arg = self.arguments[param_name]
except KeyError:
break
else:
if (param.kind == _VAR_POSITIONAL):
args.extend(arg)
else:
args.append(arg)
return tuple(args)
def kwargs(self):
kwargs = {}
kwargs_started = False
for (param_name, param) in self._signature.parameters.items():
if (not kwargs_started):
if ((param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY)) or param._partial_kwarg):
kwargs_started = True
elif (param_name not in self.arguments):
kwargs_started = True
continue
if (not kwargs_started):
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if (param.kind == _VAR_KEYWORD):
kwargs.update(arg)
else:
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and (self.signature == other.signature) and (self.arguments == other.arguments))
def __ne__(self, other):
return (not self.__eq__(other)) |
_assert
class Operation(Node):
def __init__(self, opd_ids: List[str], op_type: OperationType, input_types: List[Type], output_types: List[Type], loc_label: LocLabel, attrs: Attributes=None, const: str=None) -> None:
super().__init__()
self.opd_ids = opd_ids
self.op_type = op_type
self.input_types = input_types
self.output_types = output_types
self._attrs = attrs
self.loc_label = loc_label
self.const = const
self._name = None
self._parent: GroupOp = None
self.erased = False
def ns_opd_ids(self):
ns = self.ns
return [(ns + i) for i in self.opd_ids]
def ns(self) -> str:
if (self._parent is None):
return ''
return (self._parent.name + '-')
def erase(self):
self.erased = True
def parent(self) -> 'GroupOp':
return self._parent
def name(self):
if (self._name is None):
return self.context.locid2opname[self.loc_label.loc_id_str]
return self._name
def name(self, value):
self._name = value
def type(self) -> str:
return self.op_type.op_type_name
def attrs(self):
if (self._attrs is None):
res = {}
else:
res = self._attrs.to_dict()
if (len(self.opd_ids) != 1):
return res
if (not isinstance(self.output_types[0], NoneType)):
element_type = self.output_types[0].ir.element_type
if quant.UniformQuantizedType.isinstance(element_type):
quant_type = quant.UniformQuantizedType(element_type)
res['quant_scale'] = str(quant_type.scale)
res['quant_zero_point'] = str(quant_type.zero_point)
if quant.CalibratedQuantizedType.isinstance(element_type):
quant_type = quant.CalibratedQuantizedType(element_type)
res['calibrate_min'] = str(quant_type.min)
res['calibrate_max'] = str(quant_type.max)
return res
def opds(self) -> List[str]:
return [self.context.get_op_name_by_op_id(i) for i in self.op_type.opds if (i != '%0')]
def outputs(self) -> List[str]:
return [self.context.get_op_name_by_op_id(i) for i in self.opd_ids]
def parse(operation_line: str) -> 'Operation':
(op_id_str, op_define) = operation_line.strip().split('=', maxsplit=1)
if (op_id_str.find(':') > 0):
opd_ids = []
(op_id_prefix, count) = op_id_str.split(':')
for i in range(int(count)):
opd_ids.append(f'{op_id_prefix}#{i}')
else:
opd_ids = op_id_str.strip().split(', ')
const = None
if ('tosa' in op_define):
count_start_idx = op_define.find('<{')
if (count_start_idx != (- 1)):
count_end_idx = (op_define.find('}>') + 2)
const = op_define[count_start_idx:count_end_idx]
op_define = (op_define[:count_start_idx] + op_define[count_end_idx:])
attr_str = ''
attr_start_idx = op_define.find('{')
attr_end_idx = (- 1)
attr = None
if (attr_start_idx != (- 1)):
attr_end_idx = (op_define.find('}', attr_start_idx) + 1)
attr_str = op_define[attr_start_idx:attr_end_idx]
attr_str += ('}' * (attr_str.count('{') - 1))
attr = Attributes.parse(attr_str)
if (attr_end_idx != (- 1)):
inputs_start_idx = (attr_end_idx + 2)
type_end_idx = (attr_start_idx - 1)
else:
inputs_start_idx = (op_define.find(': ') + 1)
type_end_idx = (inputs_start_idx - 1)
inputs_end_idx = op_define.find(' ->', inputs_start_idx)
inputs_str = op_define[inputs_start_idx:inputs_end_idx]
op_type_str = op_define[:type_end_idx].strip()
outputs_start_idx = (inputs_end_idx + 3)
outputs_str = op_define[outputs_start_idx:]
input_types = Type.parse_inputs_tuple(inputs_str)
(output_types, loc_label) = parse_outputs_str(outputs_str)
op_type = OperationType.parse(op_type_str)
if op_define.strip().startswith('call'):
return CallFunc(opd_ids, op_type, input_types, output_types, loc_label)
return Operation(opd_ids, op_type, input_types, output_types, loc_label, attr, const)
def dump(self):
input_types_str = Type.dump_type_list(self.input_types, force_list=True)
output_types_str = Type.dump_type_list(self.output_types)
loc_label_str = self.loc_label.dump()
if ('#' in self.opd_ids[0]):
number = len(self.opd_ids)
prefix = self.opd_ids[0].split('#')[0]
opd_str = f'{prefix}:{number}'
else:
opd_str = ', '.join(self.opd_ids)
const_str = ''
if (self.const is not None):
const_str = f'{const_str} '
if (self._attrs is None):
return f'{opd_str} = {self.op_type.dump()} {const_str}: {input_types_str} -> {output_types_str} {loc_label_str}'
else:
attrs_str = self._attrs.dump()
return f'{opd_str} = {self.op_type.dump()} {attrs_str} : {input_types_str} -> {output_types_str} {loc_label_str}' |
class PerceptualLoss(nn.Module):
def __init__(self, recog_net, input_size=112):
super(PerceptualLoss, self).__init__()
self.recog_net = recog_net
self.preprocess = (lambda x: ((2 * x) - 1))
self.input_size = input_size
def forward(imageA, imageB, M):
imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size))
imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size))
self.recog_net.eval()
id_featureA = F.normalize(self.recog_net(imageA), dim=(- 1), p=2)
id_featureB = F.normalize(self.recog_net(imageB), dim=(- 1), p=2)
cosine_d = torch.sum((id_featureA * id_featureB), dim=(- 1))
return (torch.sum((1 - cosine_d)) / cosine_d.shape[0]) |
class FogPassFilter_conv1(nn.Module):
def __init__(self, inputsize):
super(FogPassFilter_conv1, self).__init__()
self.hidden = nn.Linear(inputsize, (inputsize // 2))
self.hidden2 = nn.Linear((inputsize // 2), (inputsize // 4))
self.output = nn.Linear((inputsize // 4), 64)
self.leakyrelu = nn.LeakyReLU()
def forward(self, x):
x = self.hidden(x)
x = self.leakyrelu(x)
x = self.hidden2(x)
x = self.leakyrelu(x)
x = self.output(x)
return x |
class PolynomialCameraCal(CameraCal):
NUM_DISTORTION_COEFFS = 3
DEFAULT_MAX_FOV = math.radians(120)
def __init__(self, focal_length: T.Sequence[T.Scalar], principal_point: T.Sequence[T.Scalar], distortion_coeffs: T.Sequence[T.Scalar]=(0.0, 0.0, 0.0), critical_undistorted_radius: T.Scalar=None, max_fov: T.Scalar=DEFAULT_MAX_FOV) -> None:
super().__init__(focal_length, principal_point, distortion_coeffs)
if (critical_undistorted_radius is not None):
self.critical_undistorted_radius = critical_undistorted_radius
elif any(((isinstance(c, sf.Expr) and (not isinstance(c, sf.Number))) for c in distortion_coeffs)):
raise ValueError('critical_undistorted_radius must be provided if the distortion_coeffs are not all numerical')
else:
self.critical_undistorted_radius = self._compute_critical_undistorted_radius(max_fov)
def from_distortion_coeffs(cls, focal_length: T.Sequence[T.Scalar], principal_point: T.Sequence[T.Scalar], distortion_coeffs: T.Sequence[T.Scalar]=tuple(), **kwargs: T.Scalar) -> PolynomialCameraCal:
return cls(focal_length=focal_length, principal_point=principal_point, distortion_coeffs=distortion_coeffs, **kwargs)
def storage_order(cls) -> T.Tuple[(T.Tuple[(str, int)], ...)]:
return (('focal_length', 2), ('principal_point', 2), ('critical_undistorted_radius', 1), ('distortion_coeffs', cls.NUM_DISTORTION_COEFFS))
def _distortion_weight(self, undistorted_radius: T.Scalar) -> T.Scalar:
total = 1.0
radius_term = 1.0
for coef in self.distortion_coeffs.to_flat_list():
radius_term *= (undistorted_radius ** 2)
total += (radius_term * coef)
return total
def pixel_from_camera_point(self, point: geo.Matrix31, epsilon: T.Scalar=sf.epsilon()) -> T.Tuple[(geo.Matrix21, T.Scalar)]:
(p_img, project_is_valid) = LinearCameraCal.project(point, epsilon)
undistorted_radius = p_img.norm(epsilon)
distortion_is_valid = sf.is_positive((self.critical_undistorted_radius - undistorted_radius))
distorted_p_img = (p_img * self._distortion_weight(undistorted_radius))
linear_camera_cal = LinearCameraCal(self.focal_length.to_flat_list(), self.principal_point.to_flat_list())
uv = linear_camera_cal.pixel_from_unit_depth(distorted_p_img)
is_valid = sf.logical_and(project_is_valid, distortion_is_valid, unsafe=True)
return (uv, is_valid)
def camera_ray_from_pixel(self, pixel: geo.Matrix21, epsilon: float=0) -> T.Tuple[(geo.Matrix31, T.Scalar)]:
raise NotImplementedError('Back projection involves computing the inverse of a polynomial function')
def _compute_critical_undistorted_radius(self, max_fov: float) -> float:
max_radius = math.tan((max_fov / 2))
return compute_odd_polynomial_critical_point(self.distortion_coeffs.to_flat_list(), max_radius)
def storage_dim(cls) -> int:
return ((4 + 1) + cls.NUM_DISTORTION_COEFFS)
def to_storage(self) -> T.List[T.Scalar]:
return (((self.focal_length.to_storage() + self.principal_point.to_storage()) + [self.critical_undistorted_radius]) + self.distortion_coeffs.to_storage())
def from_storage(cls, vec: T.Sequence[T.Scalar]) -> PolynomialCameraCal:
assert (len(vec) == cls.storage_dim())
return cls(focal_length=vec[0:2], principal_point=vec[2:4], critical_undistorted_radius=vec[4], distortion_coeffs=vec[5:])
def symbolic(cls, name: str, **kwargs: T.Any) -> PolynomialCameraCal:
with sf.scope(name):
return cls(focal_length=sf.symbols('f_x f_y'), principal_point=sf.symbols('c_x c_y'), critical_undistorted_radius=sf.Symbol('radius_crit'), distortion_coeffs=geo.Matrix(cls.NUM_DISTORTION_COEFFS, 1).symbolic('C', **kwargs).to_flat_list())
def __repr__(self) -> str:
return '<{}\n focal_length={},\n principal_point={},\n critical_undistorted_radius={},\n distortion_coeffs={}>'.format(self.__class__.__name__, self.focal_length.to_storage(), self.principal_point.to_storage(), self.critical_undistorted_radius, self.distortion_coeffs.to_storage()) |
class VGGLoss(tf.keras.Model):
def __init__(self):
super(VGGLoss, self).__init__(name='VGGLoss')
self.vgg = Vgg19()
self.layer_weights = [(1.0 / 32), (1.0 / 16), (1.0 / 8), (1.0 / 4), 1.0]
def call(self, x, y):
x = (((x + 1) / 2) * 255.0)
y = (((y + 1) / 2) * 255.0)
(x_vgg, y_vgg) = (self.vgg(preprocess_input(x)), self.vgg(preprocess_input(y)))
loss = 0
for i in range(len(x_vgg)):
y_vgg_detach = tf.stop_gradient(y_vgg[i])
loss += (self.layer_weights[i] * L1_loss(x_vgg[i], y_vgg_detach))
return loss |
def save_model(model, output_dir, ep_num):
model_to_save = (model.module if hasattr(model, 'module') else model)
model_name = (('model_' + str(ep_num)) + '.bin')
torch.save(model_to_save.state_dict(), os.path.join(output_dir, model_name)) |
class DistEvalHook(_DistEvalHook):
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.efficient_test = efficient_test
def _do_evaluate(self, runner):
if self.broadcast_bn_buffer:
model = runner.model
for (name, module) in model.named_modules():
if (isinstance(module, _BatchNorm) and module.track_running_stats):
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if (not self._should_evaluate(runner)):
return
tmpdir = self.tmpdir
if (tmpdir is None):
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmseg.apis import multi_gpu_test
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect, efficient_test=self.efficient_test)
if (runner.rank == 0):
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score) |
def make_stuff(model):
ret = (lambda : None)
def batch_loss(params, images, y_onehot):
logits = model.apply({'params': params}, images)
return jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=y_onehot))
def batch_num_correct(params, images, y_onehot):
logits = model.apply({'params': params}, images)
return jnp.sum((jnp.argmax(logits, axis=(- 1)) == jnp.argmax(y_onehot, axis=(- 1))))
def step(train_state, images, y_onehot):
(l, g) = value_and_grad(batch_loss)(train_state.params, images, y_onehot)
return (train_state.apply_gradients(grads=g), l)
def dataset_loss(params, ds):
return jnp.mean(jnp.array([(x.shape[0] * batch_loss(params, x, y)) for (x, y) in ds]))
def dataset_total_correct(params, ds):
return jnp.sum(jnp.array([batch_num_correct(params, x, y) for (x, y) in ds]))
ret.batch_loss = batch_loss
ret.batch_num_correct = batch_num_correct
ret.step = step
ret.dataset_loss = dataset_loss
ret.dataset_total_correct = dataset_total_correct
return ret |
.parametrize('backend_name', ['numpy', 'tensorflow', 'pytorch', 'PyTorch'])
def test_backend_no_custom_attributes(backend_name):
pyhf.set_backend(backend_name)
with pytest.raises(AttributeError):
pyhf.tensorlib.nonslotted = True |
class PathSemigroup(UniqueRepresentation, Parent):
Element = QuiverPath
def __classcall__(cls, Q):
Q = Q.copy(immutable=True, weighted=True)
return super().__classcall__(cls, Q)
def __init__(self, Q):
labels = Q.edge_labels()
if (len(set(labels)) != len(labels)):
raise ValueError('edge labels of the digraph must be unique')
for x in labels:
if ((not isinstance(x, str)) or (x == '')):
raise ValueError('edge labels of the digraph must be nonempty strings')
if (x[0:2] == 'e_'):
raise ValueError("edge labels of the digraph must not begin with 'e_'")
if (x.find('*') != (- 1)):
raise ValueError("edge labels of the digraph must not contain '*'")
for v in Q:
if (not isinstance(v, (Integer, int))):
raise ValueError('vertices of the digraph must be labelled by integers')
if Q.is_directed_acyclic():
cat = FiniteEnumeratedSets()
else:
cat = InfiniteEnumeratedSets()
self._sorted_edges = tuple(sorted(Q.edges(sort=True), key=(lambda x: x[2])))
self._labels = tuple([x[2] for x in self._sorted_edges])
self._label_index = {s[2]: i for (i, s) in enumerate(self._sorted_edges)}
self._nb_arrows = max(len(self._sorted_edges), 1)
names = ['e_{0}'.format(v) for v in Q.vertex_iterator()]
names += list(self._labels)
self._quiver = Q
if (Q.num_verts() == 1):
cat = cat.join([cat, Monoids()])
else:
cat = cat.join([cat, Semigroups()])
Parent.__init__(self, names=names, category=cat)
def _repr_(self):
if (self._quiver.num_verts() != 1):
return 'Partial semigroup formed by the directed paths of {}'.format(self._quiver)
return 'Monoid formed by the directed paths of {}'.format(self._quiver)
def _coerce_map_from_(self, other):
if (not isinstance(other, PathSemigroup)):
return
sQ = self._quiver._backend
oQ = other.quiver()._backend
if (sQ.num_verts() < oQ.num_verts()):
return False
if any(((not sQ.has_vertex(v)) for v in oQ.iterator_verts(None))):
return False
return all((sQ.has_edge(*e) for e in oQ.iterator_out_edges(oQ.iterator_verts(None), True)))
def _element_constructor_(self, data, check=True):
L = self._label_index
E = self._sorted_edges
if isinstance(data, QuiverPath):
if (data.parent() is self):
return data
start = data.initial_vertex()
end = data.terminal_vertex()
edge_index = {e: i for (i, e) in enumerate(E)}
path = [edge_index.get(e) for e in data]
elif (not data):
raise ValueError('no data given to define this path')
elif (data == 1):
start = end = next(self._quiver.vertex_iterator())
path = []
elif isinstance(data, str):
i = L.get(data, None)
if (i is None):
raise ValueError('data={!r} is not the label of an edge'.format(data))
(start, end, _) = E[i]
path = [i]
elif (not isinstance(data, (tuple, list))):
raise TypeError('data={} is not valid. A path must be initialized from either a tuple or a list'.format(data))
elif isinstance(data[0], str):
start = L.get(data[0])
if (start is None):
raise ValueError('data[0]={!r} is not the label of an edge'.format(data[0]))
start = E[start][0]
end = L.get(data[(- 1)])
if (end is None):
raise ValueError('data[-1]={!r} is not the label of an edge'.format(data[(- 1)]))
end = E[end][1]
path = [L.get(e) for e in data]
elif ((len(data) == 1) and (len(data[0]) == 2)):
start = data[0][0]
end = data[0][1]
path = []
else:
if any(((len(x) != 3) for x in data)):
x = next((x for x in data if (len(x) != 3)))
raise ValueError('each edge must be a triple, got {}'.format(x))
start = data[0][0]
end = data[(- 1)][1]
edge_index = {e: i for (i, e) in enumerate(E)}
path = [edge_index.get(e) for e in data]
if check:
Q = self._quiver
if ((start is None) or (start not in Q)):
raise ValueError('startpoint {} should belong to {}'.format(start, Q.vertices(sort=False)))
if ((end is None) or (end not in Q)):
raise ValueError('endpoint {} should belong to {}'.format(end, Q.vertices(sort=False)))
if (not path):
if (start != end):
raise ValueError('start and endpoint of a path of length 0 must coincide')
else:
if any(((x is None) for x in path)):
i = next((i for (i, x) in enumerate(path) if (x is None)))
raise ValueError('{} is not an edge'.format(data[i]))
for n in range(1, len(path)):
e0 = E[path[(n - 1)]][1]
e1 = E[path[n]][0]
if (e0 != e1):
raise ValueError('edge {} ends at {}, but edge {} starts at {}'.format(E[path[(n - 1)]][2], e0, E[path[n]][2], e1))
if (E[path[0]][0] != start):
raise ValueError('first edge should start at vertex {}'.format(start))
if (E[path[(- 1)]][1] != end):
raise ValueError('last edge should end at vertex {}'.format(end))
return self.element_class(self, start, end, path)
_method
def arrows(self):
return tuple((self.element_class(self, e[0], e[1], [i]) for (i, e) in enumerate(self._sorted_edges)))
_method
def idempotents(self):
return tuple((self.element_class(self, v, v, []) for v in self._quiver.vertex_iterator()))
def ngens(self):
Q = self._quiver
return (Q.num_verts() + Q.num_edges())
_method
def gen(self, i):
return self.gens()[i]
_method
def gens(self):
return (self.idempotents() + self.arrows())
def is_finite(self):
return (self._quiver.is_directed_acyclic() and (not self._quiver.has_loops()))
def __len__(self):
return len(self.all_paths())
def cardinality(self):
from sage.rings.integer_ring import ZZ
if (self._quiver.is_directed_acyclic() and (not self._quiver.has_loops())):
return ZZ(len(self))
from sage.rings.infinity import Infinity
return Infinity
def __iter__(self):
d = 0
length_d_available = True
while length_d_available:
length_d_available = False
for v in self._quiver.vertex_iterator():
for w in self.iter_paths_by_length_and_startpoint(d, v):
length_d_available = True
(yield w)
d += 1
def iter_paths_by_length_and_startpoint(self, d, v):
if (not (d >= 0)):
raise ValueError('path length must be a non-negative integer')
if (v not in self._quiver):
raise ValueError('the starting point {} is not a vertex of the underlying quiver'.format(v))
if (not d):
(yield self.element_class(self, v, v, []))
else:
for w in self.iter_paths_by_length_and_startpoint((d - 1), v):
for a in self._quiver._backend.iterator_out_edges([w.terminal_vertex()], True):
(yield self((list(w) + [a]), check=False))
def iter_paths_by_length_and_endpoint(self, d, v):
if (not (d >= 0)):
raise ValueError('path length must be a non-negative integer')
if (v not in self._quiver):
raise ValueError('the starting point {} is not a vertex of the underlying quiver'.format(v))
if (not d):
(yield self.element_class(self, v, v, []))
else:
for w in self.iter_paths_by_length_and_endpoint((d - 1), v):
for a in self._quiver._backend.iterator_in_edges([w.initial_vertex()], True):
(yield self(([a] + list(w)), check=False))
def quiver(self):
return self._quiver
_method
def reverse(self):
return self._quiver.reverse().path_semigroup()
def algebra(self, k, order='negdegrevlex'):
from sage.quivers.algebra import PathAlgebra
return PathAlgebra(k, self, order)
def representation(self, k, *args, **kwds):
return QuiverRep(k, self, *args, **kwds)
def S(self, k, vertex):
if (vertex not in self._quiver):
raise ValueError('must specify a valid vertex of the quiver')
return QuiverRep(k, self, {vertex: 1})
simple = S
def P(self, k, vertex):
if (vertex not in self._quiver):
raise ValueError('must specify a valid vertex of the quiver')
return QuiverRep(k, self, [[(vertex, vertex)]], option='paths')
projective = P
def I(self, k, vertex):
if (vertex not in self._quiver):
raise ValueError('must specify a valid vertex of the quiver')
return QuiverRep(k, self, [[(vertex, vertex)]], option='dual paths')
injective = I
def free_module(self, k):
return QuiverRep(k, self, [[(v, v)] for v in self._quiver], option='paths')
_attribute
def _poincare_series(self):
P = ZZ['t']
t = P.gen()
M = self._quiver.adjacency_matrix()
out = (~ (1 - (M * t)))
out.set_immutable()
return out
def all_paths(self, start=None, end=None):
if ((start is not None) and (start not in self._quiver)):
raise ValueError('the start vertex {} is not a vertex of the quiver'.format(start))
if ((end is not None) and (end not in self._quiver)):
raise ValueError('the end vertex {} is not a vertex of the quiver'.format(end))
Q = self._quiver
if (not Q.is_directed_acyclic()):
raise ValueError('the underlying quiver has cycles, thus, there may be an infinity of directed paths')
if (start is None):
results = []
for v in Q:
results += self.all_paths(v, end)
return results
if (end is None):
results = []
for v in Q:
results += self.all_paths(start, v)
return results
if (start == end):
return [self.element_class(self, start, end, [])]
def _v_to_e(path):
if (len(path) == 1):
return [self.element_class(self, path[0], path[0], [])]
paths = []
l = Q.edge_label(path[0], path[1])
if isinstance(l, str):
for b in _v_to_e(path[1:]):
paths.append(self(([(path[0], path[1], l)] + list(b)), check=False))
else:
for a in l:
for b in _v_to_e(path[1:]):
paths.append(self(([(path[0], path[1], a)] + list(b)), check=False))
return paths
result = []
for path in Q.all_paths(start, end):
result += _v_to_e(path)
return result |
_HEADS_REGISTRY.register()
class ClasHead(EmbeddingHead):
def forward(self, features, targets=None):
pool_feat = self.pool_layer(features)
neck_feat = self.bottleneck(pool_feat)
neck_feat = neck_feat.view(neck_feat.size(0), (- 1))
if (self.cls_layer.__class__.__name__ == 'Linear'):
logits = F.linear(neck_feat, self.weight)
else:
logits = F.linear(F.normalize(neck_feat), F.normalize(self.weight))
if (not self.training):
return logits.mul_(self.cls_layer.s)
cls_outputs = self.cls_layer(logits.clone(), targets)
return {'cls_outputs': cls_outputs, 'pred_class_logits': logits.mul_(self.cls_layer.s), 'features': neck_feat} |
class SymmetricTensorDescription():
def __init__(self, element, layout, fill_mode, alignment=1, complex_transform=ComplexTransform.none, side_mode=SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.complex_transform = complex_transform
self.side_mode = side_mode |
_cache()
def setup_logger_dist(output=None, distributed_rank=0, *, color=True, name='moco', abbrev_name=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if (abbrev_name is None):
abbrev_name = name
plain_formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt='%m/%d %H:%M:%S')
if (distributed_rank == 0):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter((colored('[%(asctime)s %(name)s]: ', 'green') + '%(message)s'), datefmt='%m/%d %H:%M:%S', root_name=name, abbrev_name=str(abbrev_name))
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
if (output is not None):
if (output.endswith('.txt') or output.endswith('.log')):
filename = output
else:
filename = os.path.join(output, 'log.txt')
if (distributed_rank > 0):
filename = (filename + f'.rank{distributed_rank}')
os.makedirs(os.path.dirname(filename), exist_ok=True)
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
logging.root = logger
return logger |
class Warmup(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, init_lr_ratio: float=0.0, num_epochs: int=5, last_epoch: int=(- 1), iters_per_epoch: int=None):
self.base_scheduler = scheduler
self.warmup_iters = max((num_epochs * iters_per_epoch), 1)
if (self.warmup_iters > 1):
self.init_lr_ratio = init_lr_ratio
else:
self.init_lr_ratio = 1.0
super().__init__(optimizer, last_epoch)
def get_lr(self):
assert (self.last_epoch < self.warmup_iters)
return [(el * (self.init_lr_ratio + ((1 - self.init_lr_ratio) * (float(self.last_epoch) / self.warmup_iters)))) for el in self.base_lrs]
def step(self, *args, **kwargs):
if (self.last_epoch < (self.warmup_iters - 1)):
super().step(*args, **kwargs)
else:
self.base_scheduler.step(*args, **kwargs) |
def test_cache_different_args():
def test(x):
return (x * x)
a = np.random.rand(2)
b = np.random.rand(3)
ra = test(a)
assert (len(test._cache.cache) == 1)
rb = test(b)
assert (len(test._cache.cache) == 2)
assert np.allclose((a * a), ra)
assert np.allclose((b * b), rb) |
def _sanity_check_tmap(T):
if (not math.isclose(np.sum(T), 1.0, abs_tol=1e-07)):
print('Sum of transport map is ', np.sum(T))
raise Exception('NAN inside Transport MAP. Most likely due to large ground metric values') |
def extract_data(inputs):
assert isinstance(inputs, dict)
new_inputs = {}
for (key, value) in inputs.items():
assert isinstance(value, DataContainer)
data = value.data
if value.cpu_only:
new_inputs[key] = data[0]
else:
device = get_device(data)
if (device == (- 1)):
data = cpu_to_gpu(data[0], torch.cuda.current_device())
new_inputs[key] = data
return new_inputs |
def test_Task12AXDataset_deepcopy():
from copy import deepcopy
dataset = Task12AXDataset(num_seqs=10)
dataset = deepcopy(dataset)
dataset.init_seq_order(1)
n = dataset.num_seqs
for i in range(n):
dataset.load_seqs(i, (i + 1))
targets = dataset.get_data(i, 'classes')
print(targets)
assert (not dataset.is_less_than_num_seqs(n)) |
def get_ngpus_per_node(args):
nnodes = args.nnodes
if (not hasattr(args, 'ngpus_per_node')):
if ((args.world_size % nnodes) != 0):
raise NotImplementedError()
ngpus_per_node = ([(args.world_size // nnodes)] * nnodes)
else:
ngpus_per_node = args.ngpus_per_node
assert (len(ngpus_per_node) == nnodes)
return ngpus_per_node |
class TestIterationLimits():
def setup_method(self):
self.funcalls = 0
def slow_func(self, v):
self.funcalls += 1
(r, t) = (np.sqrt(((v[0] ** 2) + (v[1] ** 2))), np.arctan2(v[0], v[1]))
return (np.sin(((r * 20) + t)) + (r * 0.5))
def test_neldermead_limit(self):
self.check_limits('Nelder-Mead', 200)
def test_powell_limit(self):
self.check_limits('powell', 1000)
def check_limits(self, method, default_iters):
for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
for mfev in [50, 500, 5000]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v, method=method, options={'maxfev': mfev})
assert (self.funcalls == res['nfev'])
if res['success']:
assert (res['nfev'] < mfev)
else:
assert (res['nfev'] >= mfev)
for mit in [50, 500, 5000]:
res = optimize.minimize(self.slow_func, start_v, method=method, options={'maxiter': mit})
if res['success']:
assert (res['nit'] <= mit)
else:
assert (res['nit'] >= mit)
for (mfev, mit) in [[50, 50], [5000, 5000], [5000, np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v, method=method, options={'maxiter': mit, 'maxfev': mfev})
assert (self.funcalls == res['nfev'])
if res['success']:
assert ((res['nfev'] < mfev) and (res['nit'] <= mit))
else:
assert ((res['nfev'] >= mfev) or (res['nit'] >= mit))
for (mfev, mit) in [[np.inf, None], [None, np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v, method=method, options={'maxiter': mit, 'maxfev': mfev})
assert (self.funcalls == res['nfev'])
if res['success']:
if (mfev is None):
assert (res['nfev'] < (default_iters * 2))
else:
assert (res['nit'] <= (default_iters * 2))
else:
assert ((res['nfev'] >= (default_iters * 2)) or (res['nit'] >= (default_iters * 2))) |
class Dim(_DimMixin):
Types = DimTypes
__slots__ = ('name', 'capacity', 'size', 'dyn_size_ext', '_dyn_size_max_value', '_extra')
name: Optional[str]
capacity: Optional[int]
size: Optional[int]
dyn_size_ext: Optional[_t.Tensor]
_dyn_size_max_value: Optional[_t.Tensor]
_extra: Optional[_DimExtra]
def __init__(self, dimension: Optional[Union[(int, _t.Tensor)]], *, name: Optional[str]=None, capacity: Optional[int]=None, dyn_size_ext: Optional[_t.Tensor]=None, description: Optional[str]=None, **kwargs):
if (dimension is None):
self.capacity = capacity
self.size = None
self.dyn_size_ext = (dyn_size_ext.copy() if dyn_size_ext else None)
elif isinstance(dimension, int):
self.capacity = (capacity or dimension)
self.size = dimension
self.dyn_size_ext = None
elif isinstance(dimension, _t.Tensor):
self.capacity = capacity
self.size = None
self.dyn_size_ext = dimension.copy()
else:
raise TypeError(f'unexpected dimension type: {type(dimension)}')
if ((not name) and (not description) and self.dyn_size_ext):
name = self.dyn_size_ext.name
self.name = (name or description)
self._dyn_size_max_value = None
self._extra = None
if kwargs:
self._handle_extra_kwargs(**kwargs)
def __repr__(self):
return ('Dim{%s}' % self.short_repr()) |
def add_additional_type_casts(func: LeanFunctionInfo, rw_casts: List[str], additional_types: List[CairoType]) -> List[str]:
additional_casts = func.struct_defs.get_lean_type_cast_rec(scope=func.func_scope, cairo_types=additional_types, open_namespaces=func.open_namespaces)
for cast in additional_casts:
if (cast not in rw_casts):
rw_casts.append(cast)
return rw_casts |
def remap_state_dict_hf_gpt_neox(state_dict, config):
def key_mapping_layers(key):
return re.sub('^gpt_neox.', 'transformer.', key)
state_dict = OrderedDict(((key_mapping_layers(k), v) for (k, v) in state_dict.items()))
def key_mapping_emb(key):
return re.sub('^transformer.embed_in.', 'transformer.embeddings.word_embeddings.', key)
state_dict = OrderedDict(((key_mapping_emb(k), v) for (k, v) in state_dict.items()))
word_embeddings = state_dict.pop('transformer.embeddings.word_embeddings.weight')
pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
vocab_size = (math.ceil((config.vocab_size / pad_vocab_size_multiple)) * pad_vocab_size_multiple)
state_dict['transformer.embeddings.word_embeddings.weight'] = F.pad(word_embeddings, (0, 0, 0, (vocab_size - word_embeddings.shape[0])))
if getattr(config, 'tie_word_embeddings'):
state_dict['lm_head.weight'] = state_dict['transformer.embeddings.word_embeddings.weight']
else:
output_embeddings = state_dict.pop('embed_out.weight')
state_dict['lm_head.weight'] = F.pad(output_embeddings, (0, 0, 0, (vocab_size - output_embeddings.shape[0])))
def key_mapping_ln(key):
key = re.sub('^transformer.final_layer_norm.', 'transformer.ln_f.', key)
key = re.sub('^transformer.layers.(\\d+).input_layernorm.', 'transformer.layers.\\1.norm1.', key)
key = re.sub('^transformer.layers.(\\d+).post_attention_layernorm.', 'transformer.layers.\\1.norm2.', key)
return key
state_dict = OrderedDict(((key_mapping_ln(k), v) for (k, v) in state_dict.items()))
def key_mapping_mlp(key):
key = re.sub('^transformer.layers.(\\d+).mlp.dense_h_to_4h.', 'transformer.layers.\\1.mlp.fc1.', key)
key = re.sub('^transformer.layers.(\\d+).mlp.dense_4h_to_h.', 'transformer.layers.\\1.mlp.fc2.', key)
return key
state_dict = OrderedDict(((key_mapping_mlp(k), v) for (k, v) in state_dict.items()))
for l in range(config.n_layer):
state_dict.pop(f'transformer.layers.{l}.attention.bias')
state_dict.pop(f'transformer.layers.{l}.attention.masked_bias')
headdim = (config.hidden_size // config.num_attention_heads)
Wqkv = state_dict.pop(f'transformer.layers.{l}.attention.query_key_value.weight')
state_dict[f'transformer.layers.{l}.mixer.Wqkv.weight'] = rearrange(Wqkv, '(nheads three headdim) ... -> (three nheads headdim) ...', three=3, headdim=headdim)
bqkv = state_dict.pop(f'transformer.layers.{l}.attention.query_key_value.bias')
state_dict[f'transformer.layers.{l}.mixer.Wqkv.bias'] = rearrange(bqkv, '(nheads three headdim) -> (three nheads headdim)', three=3, headdim=headdim)
def key_mapping_attn(key):
key = re.sub('^transformer.layers.(\\d+).attention.dense.', 'transformer.layers.\\1.mixer.out_proj.', key)
key = re.sub('^transformer.layers.(\\d+).attention.rotary_emb.', 'transformer.layers.\\1.mixer.rotary_emb.', key)
return key
state_dict = OrderedDict(((key_mapping_attn(k), v) for (k, v) in state_dict.items()))
return state_dict |
def run_nimbix(target, data):
target.write('run_nimbix: all\n')
if ('launch' in data):
if ('cmd_args' in data['launch'][0]):
target.write('\t$(COMMON_REPO)/common/utility/nimbix/run_nimbix.py $(EXECUTABLE) $(CMD_ARGS) $(XSA)\n\n')
else:
target.write('\t$(COMMON_REPO)/common/utility/nimbix/run_nimbix.py $(EXECUTABLE) $(XSA)\n\n') |
def test_dense_heads_test_attr():
exceptions = ['FeatureAdaption']
all_dense_heads = [m for m in dense_heads.__all__ if (m not in exceptions)]
check_attributes = ['simple_test', 'aug_test', 'simple_test_bboxes', 'simple_test_rpn', 'aug_test_rpn']
table_header = (['head name'] + check_attributes)
table_data = [table_header]
not_found = {k: [] for k in check_attributes}
for target_head_name in all_dense_heads:
target_head = globals()[target_head_name]
target_head_attributes = dir(target_head)
check_results = [target_head_name]
for check_attribute in check_attributes:
found = (check_attribute in target_head_attributes)
check_results.append(found)
if (not found):
not_found[check_attribute].append(target_head_name)
table_data.append(check_results)
table = AsciiTable(table_data)
print()
print(table.table)
assert (len(not_found['simple_test']) == 0), f"simple_test not found in {not_found['simple_test']}"
if (len(not_found['aug_test']) != 0):
warnings.warn(f"aug_test not found in {not_found['aug_test']}. Please implement it or raise NotImplementedError.") |
(frozen=True)
class Return():
name: Optional[str]
type: Type
annotation: Optional[Annotation]
def parse(arg: str) -> 'Return':
name: Optional[str]
if (' ' in arg):
(type_and_annot, name) = arg.rsplit(' ', 1)
else:
type_and_annot = arg
name = None
match = re.match('Tensor\\((.+)\\)(.*)', type_and_annot)
annotation: Optional[Annotation]
if match:
assert (match.group(2) in ['', '?', '[]']), 'unrecognized alias analysis form with Tensor'
type_s = ('Tensor' + match.group(2))
annotation = Annotation.parse(match.group(1))
else:
type_s = type_and_annot
annotation = None
type = Type.parse(type_s)
r = Return(name=name, type=type, annotation=annotation)
assert (str(r) == arg), f'{str(r)} != {arg}'
return r
def is_write(self) -> bool:
return ((self.annotation is not None) and self.annotation.is_write)
def __str__(self) -> str:
type = f'{self.type}'
if self.annotation:
assert (type in ['Tensor', 'Tensor?', 'Tensor[]'])
type = type.replace('Tensor', f'Tensor({self.annotation})')
if (self.name is None):
return type
else:
return f'{type} {self.name}' |
def load_clusters():
topics = []
clusters = os.listdir(args.input_dir)
for cluster_file in clusters:
doc_names_list = []
if (cluster_file == 'metrics.txt'):
continue
print(cluster_file)
full_path = os.path.join(args.input_dir, cluster_file)
with open(full_path, 'r') as f:
for line in f:
new_line = line.strip().split('_')
topic_name = new_line[0]
doc_name = ((topic_name + '_') + new_line[1])
print(doc_name)
doc_names_list.append(doc_name)
topics.append(doc_names_list)
with open(os.path.join(args.out_dir, 'predicted_topics'), 'wb') as f:
cPickle.dump(topics, f) |
def main():
args = get_args()
lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe')
lm.eval().cuda()
if ((args.manifest is None) and (args.prompts_description is None)):
target_ids = None
else:
target_ids = get_target_sequences(args.manifest, args.prompts_description)
with open(args.asr_transcript, 'r') as fin:
lines = fin.readlines()
if (target_ids is not None):
filtered = []
for line in lines:
line_id = line.split()[(- 1)]
line_id = int(line_id.split('-')[1][:(- 1)])
if (line_id in target_ids):
filtered.append(line)
lines = filtered
else:
pass
if args.cut_id:
lines = [' '.join(x.split()[1:]) for x in lines]
if args.cut_tail:
lines = [' '.join(x.split()[:(- 1)]) for x in lines]
lines = [x.strip().lower() for x in lines]
def get_logprob(sent):
return lm.score(sent)['positional_scores'].mean().neg().item()
logprobs = [get_logprob(l) for l in lines]
filtered = [x for x in logprobs if (not np.isnan(x))]
if (len(filtered) != len(logprobs)):
warnings.warn('NaNs detected!')
logprobs = filtered
perplexities = [np.exp(l) for l in logprobs]
for (name, stats) in [('logprob', logprobs), ('perplexity', perplexities)]:
mean = np.mean(stats)
sem = (np.std(stats) / np.sqrt(len(stats)))
median = np.median(stats)
interval = list(np.percentile(stats, [10, 90]))
(mean, sem, median, percentile10, percentile90) = [round(x, 2) for x in ([mean, sem, median] + interval)]
print(name)
print(f' Mean {mean} +- {sem}')
print(f' Median {median}, 90% confidence interval {percentile10}...{percentile90}') |
def _seg_42():
return [(64060, 'M', u''), (64061, 'M', u''), (64062, 'M', u''), (64063, 'M', u''), (64064, 'M', u''), (64065, 'M', u''), (64066, 'M', u''), (64067, 'M', u''), (64068, 'M', u''), (64069, 'M', u''), (64070, 'M', u''), (64071, 'M', u''), (64072, 'M', u''), (64073, 'M', u''), (64074, 'M', u''), (64075, 'M', u''), (64076, 'M', u''), (64077, 'M', u''), (64078, 'M', u''), (64079, 'M', u''), (64080, 'M', u''), (64081, 'M', u''), (64082, 'M', u''), (64083, 'M', u''), (64084, 'M', u''), (64085, 'M', u''), (64086, 'M', u''), (64087, 'M', u''), (64088, 'M', u''), (64089, 'M', u''), (64090, 'M', u''), (64091, 'M', u''), (64092, 'M', u''), (64093, 'M', u''), (64095, 'M', u''), (64096, 'M', u''), (64097, 'M', u''), (64098, 'M', u''), (64099, 'M', u''), (64100, 'M', u''), (64101, 'M', u''), (64102, 'M', u''), (64103, 'M', u''), (64104, 'M', u''), (64105, 'M', u''), (64106, 'M', u''), (64107, 'M', u''), (64108, 'M', u''), (64109, 'M', u''), (64110, 'X'), (64112, 'M', u''), (64113, 'M', u''), (64114, 'M', u''), (64115, 'M', u''), (64116, 'M', u''), (64117, 'M', u''), (64118, 'M', u''), (64119, 'M', u''), (64120, 'M', u''), (64121, 'M', u''), (64122, 'M', u''), (64123, 'M', u''), (64124, 'M', u''), (64125, 'M', u''), (64126, 'M', u''), (64127, 'M', u''), (64128, 'M', u''), (64129, 'M', u''), (64130, 'M', u''), (64131, 'M', u''), (64132, 'M', u''), (64133, 'M', u''), (64134, 'M', u''), (64135, 'M', u''), (64136, 'M', u''), (64137, 'M', u''), (64138, 'M', u''), (64139, 'M', u''), (64140, 'M', u''), (64141, 'M', u''), (64142, 'M', u''), (64143, 'M', u''), (64144, 'M', u''), (64145, 'M', u''), (64146, 'M', u''), (64147, 'M', u''), (64148, 'M', u''), (64149, 'M', u''), (64150, 'M', u''), (64151, 'M', u''), (64152, 'M', u''), (64153, 'M', u''), (64154, 'M', u''), (64155, 'M', u''), (64156, 'M', u''), (64157, 'M', u''), (64158, 'M', u''), (64159, 'M', u''), (64160, 'M', u''), (64161, 'M', u'')] |
def train_transforms(inp_size, scale):
return transforms.Compose([transforms.RandomResizedCrop(inp_size, scale=scale), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) |
class Scorer():
def __init__(self, metrics: Optional[List[str]]=None, custom_metric_funcs: Optional[Mapping[(str, Callable[(..., float)])]]=None, abstain_label: Optional[int]=(- 1)) -> None:
self.metrics: Dict[(str, Callable[(..., float)])]
self.metrics = {}
if metrics:
for metric in metrics:
if (metric not in METRICS):
raise ValueError(f'Unrecognized metric: {metric}')
filter_dict = ({} if ((abstain_label is None) or (metric == 'coverage')) else {'golds': [abstain_label], 'preds': [abstain_label]})
self.metrics.update({metric: partial(metric_score, metric=metric, filter_dict=filter_dict)})
if (custom_metric_funcs is not None):
self.metrics.update(custom_metric_funcs)
self.abstain_label = abstain_label
def score(self, golds: np.ndarray, preds: Optional[np.ndarray]=None, probs: Optional[np.ndarray]=None) -> Dict[(str, float)]:
if (len(golds) == 0):
raise ValueError('Cannot score empty labels')
metric_dict = dict()
for (metric_name, metric) in self.metrics.items():
score = metric(golds, preds, probs)
if isinstance(score, dict):
metric_dict.update(score)
else:
metric_dict[metric_name] = score
return metric_dict
def score_slices(self, S: np.recarray, golds: np.ndarray, preds: np.ndarray, probs: np.ndarray, as_dataframe: bool=False) -> Union[(Dict[(str, Dict[(str, float)])], pd.DataFrame)]:
correct_shapes = (S.shape[0] == len(golds) == len(preds) == len(probs))
if (not correct_shapes):
raise ValueError('S, golds, preds, and probs must have the same number of elements')
metrics_dict = {'overall': self.score(golds, preds, probs)}
slice_names = S.dtype.names
for slice_name in slice_names:
mask = S[slice_name].astype(bool)
metrics_dict[slice_name] = self.score(golds[mask], preds[mask], probs[mask])
if as_dataframe:
return pd.DataFrame.from_dict(metrics_dict).transpose()
return metrics_dict |
def test_anntorchdataset_numpy(adata):
adata_manager = generic_setup_adata_manager(adata)
bd = AnnTorchDataset(adata_manager)
for value in bd[1].values():
assert (type(value) == np.ndarray) |
.unit
.convert
.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning')
def test_line_to_json_ra_dec():
helpers.setup(with_data=True)
in_wcs = WCS(fits.getheader(os.path.join(helpers.TEST_PATH, 'test_image.fits')))
columns = ['id', 'ra', 'dec', 'col1', 'col2']
catalog_assets_path = os.path.join(helpers.TEST_PATH, 'catalog_assets')
os.mkdir(catalog_assets_path)
in_line = ['1', '53.18575', '-27.898664', 'abc', '123']
expected_json = dict(geometry=dict(coordinates=[289., 301.]), tags=dict(a=(- 1), b=(- 1), theta=(- 1), catalog_id='1', cat_path='catalog_assets'))
actual_json = convert.line_to_json(in_wcs, columns, catalog_assets_path, in_line)
helpers.tear_down()
np.testing.assert_allclose(expected_json['geometry']['coordinates'], actual_json['geometry']['coordinates'], atol=1e-06)
assert (expected_json['tags'] == actual_json['tags']) |
class ScaleLinear(nn.Module):
def __init__(self, dim_in, dim_out, dim_c):
super(ScaleLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper = nn.Linear((1 + dim_c), dim_out)
def forward(self, context, x):
gate = self._hyper(context)
if (x.dim() == 3):
gate = gate.unsqueeze(1)
return (self._layer(x) * gate) |
def test_export_sequence_unexpected_exception(exportable_test_case_with_unexpected_exception, tmp_path):
path = (tmp_path / 'generated_with_unexpected_exception.py')
exporter = export.PyTestChromosomeToAstVisitor()
exportable_test_case_with_unexpected_exception.accept(exporter)
export.save_module_to_file(exporter.to_module(), path)
assert (path.read_text() == (export._PYNGUIN_FILE_HEADER + 'import pytest\nimport tests.fixtures.accessibles.accessible as module_0\n\n\.xfail(strict=True)\ndef test_case_0():\n float_0 = 42.23\n float_1 = module_0.simple_function(float_0)\n')) |
_cache(maxsize=1)
def query_which_cloud() -> str:
check_exit_code = (lambda cmd: subprocess.call(shlex.split(cmd), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
aws_metadata_url = 'curl -f --connect-timeout 1 --noproxy *
azure_metadata_url = 'curl -f --connect-timeout 1 -H Metadata:true --noproxy *
gcp_metadata_url = 'curl -f --connect-timeout 1 -noproxy *
if (check_exit_code(aws_metadata_url) == 0):
return 'aws'
elif (check_exit_code(azure_metadata_url) == 0):
return 'azure'
elif (check_exit_code(gcp_metadata_url) == 0):
return 'gcp'
else:
return 'unknown' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.