code stringlengths 101 5.91M |
|---|
def _get_threadpool_controller():
if (not hasattr(threadpoolctl, 'ThreadpoolController')):
return None
if (not hasattr(sklearn, '_sklearn_threadpool_controller')):
sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController()
return sklearn._sklearn_threadpool_controller |
def DM_33_6_1():
M = [[0, 0, 0, 0, 0, 0], [15, 11, 22, 4, 17, 8], [19, 7, 14, 32, 22, 18], [22, 19, 8, 24, 21, 6], [9, 12, 15, 7, 26, 14], [14, 28, 23, 2, 19, 3]]
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(33)
Mb = [[0, 0, 0, 0, 0, 0], [1, 4, 16, 31, 25, 22], [7, 28, 13, 19, 10, 0]]
for R in zip(*M):
(a, b, c, d, e, f) = R
for i in range(5):
Mb.append([a, b, c, d, e, f])
(a, b, c, d, e, f) = ((4 * e), (4 * a), (4 * b), (4 * c), (4 * d), (4 * f))
return (G, Mb) |
('Performing dryrun')
def do_dry_run(dryrun_suite: str, conf_path: str, max_eval_instances: int, priority: int, models: Optional[List[str]]) -> str:
output_path: str = OUTPUT_PATH_TEMPLATE.format(suite=dryrun_suite)
shutil.rmtree(output_path, ignore_errors=True)
hlog(f'Deleted old results at path: {output_path}.')
command: List[str] = ['helm-run', f'--suite={dryrun_suite}', f'--conf-path={conf_path}', f'--max-eval-instances={max_eval_instances}', '--local', '--dry-run', f'--priority={priority}']
if models:
command.append('--models-to-run')
command.extend(models)
hlog(' '.join(command))
subprocess.call(command)
hlog(f'Results are written out to path: {output_path}.')
return output_path |
class TestSphericalBoundariesIntersections(TestCase):
def test_2d_sphere_constraints(self):
(ta, tb, intersect) = sphere_intersections([0, 0], [1, 0], 0.5)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
(ta, tb, intersect) = sphere_intersections([2, 0], [0, 1], 1)
assert_equal(intersect, False)
(ta, tb, intersect) = sphere_intersections([2, 0], [1, 0], 1)
assert_equal(intersect, False)
(ta, tb, intersect) = sphere_intersections([2, 0], [(- 1), 0], 1.5)
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
(ta, tb, intersect) = sphere_intersections([2, 0], [1, 0], 2)
assert_array_almost_equal([ta, tb], [0, 0])
assert_equal(intersect, True)
def test_2d_sphere_constraints_line_intersections(self):
(ta, tb, intersect) = sphere_intersections([0, 0], [1, 0], 0.5, entire_line=True)
assert_array_almost_equal([ta, tb], [(- 0.5), 0.5])
assert_equal(intersect, True)
(ta, tb, intersect) = sphere_intersections([2, 0], [0, 1], 1, entire_line=True)
assert_equal(intersect, False)
(ta, tb, intersect) = sphere_intersections([2, 0], [1, 0], 1, entire_line=True)
assert_array_almost_equal([ta, tb], [(- 3), (- 1)])
assert_equal(intersect, True)
(ta, tb, intersect) = sphere_intersections([2, 0], [(- 1), 0], 1.5, entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, 3.5])
assert_equal(intersect, True)
(ta, tb, intersect) = sphere_intersections([2, 0], [1, 0], 2, entire_line=True)
assert_array_almost_equal([ta, tb], [(- 4), 0])
assert_equal(intersect, True) |
def get_vectors_norm(vectors):
transposed = tf.transpose(vectors)
v_mag = tf.sqrt(tf.math.reduce_sum((transposed * transposed), axis=0))
return tf.transpose(tf.math.divide_no_nan(transposed, v_mag)) |
.parametrize('action_size', [3])
.parametrize('observation_shape', [(100,)])
.parametrize('epsilon', [0.5])
def test_constant_epsilon_greedy(action_size: int, observation_shape: Sequence[int], epsilon: float) -> None:
explorer = ConstantEpsilonGreedy(epsilon)
ref_x = np.random.random((1, *observation_shape))
ref_y = np.random.randint(action_size, size=(1,))
algo = DummyAlgo(action_size, ref_x, ref_y)
for i in range(10):
action = np.random.randint(action_size, size=(1,))
if (action != explorer.sample(algo, ref_x, 0)):
break
if (i == 9):
assert False |
def conv2da(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d', reuse=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv |
def _latex_file_(objects, title='SAGE', debug=False, sep='', tiny=False, math_left='\\[', math_right='\\]', extra_preamble=''):
process = True
if has_latex_attr(objects):
objects = [objects]
if (not isinstance(objects, list)):
objects = [objects]
if tiny:
size = '\\tiny\n'
else:
size = ''
formatted_title = (('\n\\begin{center}{\\Large\\bf %s}\\end{center}\n' % str(title)) if title else '')
s = ('%s\n\\begin{document}%s%s' % (extra_preamble, formatted_title, size))
if title:
s += '\\vspace{40mm}'
if process:
for i in range(len(objects)):
x = objects[i]
L = latex(x)
if ('\\begin{pgfpicture}' in L):
s += '\\begingroup\\makeatletter\\{pgffigure}{\\newsavebox{\\pgffigure}}{}\\makeatother\\endgroup'
s += ('\\begin{lrbox}{\\pgffigure}' + '\n')
s += ('%s' % L)
s += '\\end{lrbox}'
s += ('\\resizebox{\\ifdim\\width>\\textwidth\\textwidth\\else\\width\\fi}{!}{\\usebox{\\pgffigure}}' + '\n')
elif ('\\begin{verbatim}' not in L):
s += ('%s%s%s' % (math_left, L, math_right))
else:
s += ('%s' % L)
if (i < (len(objects) - 1)):
s += ('\n\n%s\n\n' % sep)
else:
s += '\n\n'.join((str(x) for x in objects))
MACROS = latex_extra_preamble()
s = ((((LATEX_HEADER + '\n') + MACROS) + s) + '\n\\end{document}')
if debug:
print(s)
return s |
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed) |
def SetDel(s, e):
ctx = _ctx_from_ast_arg_list([s, e])
e = _py2expr(e, ctx)
return ArrayRef(Z3_mk_set_del(ctx.ref(), s.as_ast(), e.as_ast()), ctx) |
class ReasoningQAPromptHelper(PromptHelper):
few_shot_examples = REASONING_QA_FEWSHOT_EXAMPLES
def get_chatgpt_query(self, metadata: Dict[(str, Any)]) -> Dict[(str, Any)]:
return metadata
def postprocess_response_text(self, text: str, query, uri) -> Dict[(str, Any)]:
response = self.check_chatgpt_response_meets_schema(parse_almost_json(text))
row = dict((list(query.items()) + [('uri', uri)]))
row['response'] = response
return row
def check_chatgpt_response_meets_schema(self, response: Union[(Dict, List[Dict])]) -> Union[(Dict, List[Dict])]:
assert isinstance(response, list)
expected_fields = ('question', 'answer')
for elem in response:
for expected_field in expected_fields:
if (expected_field not in elem):
raise ValueError(f'Missing field from ChatGPT response: {expected_field}')
return response |
def build_lightning_optimizers(model, config):
optimizer = build_optimizer(model, config)
if config.training.lr_scheduler:
lr_scheduler = build_scheduler(optimizer, config)
return {'optimizer': optimizer, 'lr_scheduler': {'scheduler': lr_scheduler, 'interval': 'step'}}
else:
return optimizer |
class mask_rcnn_outputs(nn.Module):
def __init__(self, dim_in):
super().__init__()
self.dim_in = dim_in
n_classes = (cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1)
if cfg.MRCNN.USE_FC_OUTPUT:
self.classify = nn.Linear(dim_in, (n_classes * (cfg.MRCNN.RESOLUTION ** 2)))
else:
self.classify = nn.Conv2d(dim_in, n_classes, 1, 1, 0)
if (cfg.MRCNN.UPSAMPLE_RATIO > 1):
self.upsample = mynn.BilinearInterpolation2d(n_classes, n_classes, cfg.MRCNN.UPSAMPLE_RATIO)
self._init_weights()
def _init_weights(self):
if ((not cfg.MRCNN.USE_FC_OUTPUT) and cfg.MRCNN.CLS_SPECIFIC_MASK and (cfg.MRCNN.CONV_INIT == 'MSRAFill')):
weight_init_func = mynn.init.MSRAFill
else:
weight_init_func = partial(init.normal_, std=0.001)
weight_init_func(self.classify.weight)
init.constant_(self.classify.bias, 0)
def detectron_weight_mapping(self):
mapping = {'classify.weight': 'mask_fcn_logits_w', 'classify.bias': 'mask_fcn_logits_b'}
if hasattr(self, 'upsample'):
mapping.update({'upsample.upconv.weight': None, 'upsample.upconv.bias': None})
orphan_in_detectron = []
return (mapping, orphan_in_detectron)
def forward(self, x):
x = self.classify(x)
if (cfg.MRCNN.UPSAMPLE_RATIO > 1):
x = self.upsample(x)
if (not self.training):
x = F.sigmoid(x)
return x |
def handler(event):
size = event.get('size')
graph_generating_begin = datetime.datetime.now()
graph = igraph.Graph.Barabasi(size, 10)
graph_generating_end = datetime.datetime.now()
process_begin = datetime.datetime.now()
result = graph.spanning_tree(None, False)
process_end = datetime.datetime.now()
graph_generating_time = ((graph_generating_end - graph_generating_begin) / datetime.timedelta(microseconds=1))
process_time = ((process_end - process_begin) / datetime.timedelta(microseconds=1))
return {'result': result[0], 'measurement': {'graph_generating_time': graph_generating_time, 'compute_time': process_time}} |
class Translator_difftok_tail(nn.Module):
def __init__(self, num_tok, num_tok_out, dim, dim_out, mult=2, depth=5):
super().__init__()
self.blocks = nn.ModuleList([translator_base(num_tok, dim, dim, mult=2) for d in range(depth)])
self.gelu = nn.GELU()
self.tail = translator_difftok(num_tok, num_tok_out, dim, dim_out, mult=2)
def forward(self, x):
for block in self.blocks:
x = (block(x) + x)
x = self.gelu(x)
x = self.tail(x)
return x |
class SpeechRecognitionModel(nn.Module):
def __init__(self, n_cnn_layers, n_rnn_layers, rnn_dim, n_class, n_feats, stride=2, dropout=0.1):
super(SpeechRecognitionModel, self).__init__()
n_feats = (n_feats // 2)
self.cnn = nn.Conv2d(1, 32, 3, stride=stride, padding=(3 // 2))
self.rescnn_layers = nn.Sequential(*[ResidualCNN(32, 32, kernel=3, stride=1, dropout=dropout, n_feats=n_feats) for _ in range(n_cnn_layers)])
self.fully_connected = nn.Linear((n_feats * 32), rnn_dim)
self.birnn_layers = nn.Sequential(*[BidirectionalGRU(rnn_dim=(rnn_dim if (i == 0) else (rnn_dim * 2)), hidden_size=rnn_dim, dropout=dropout, batch_first=(i == 0)) for i in range(n_rnn_layers)])
self.classifier = nn.Sequential(nn.Linear((rnn_dim * 2), rnn_dim), nn.SELU(), nn.Dropout(dropout), nn.Linear(rnn_dim, n_class))
def forward(self, x):
x = self.cnn(x)
x = self.rescnn_layers(x)
sizes = x.size()
x = x.view(sizes[0], (sizes[1] * sizes[2]), sizes[3])
x = x.transpose(1, 2)
x = self.fully_connected(x)
x = self.birnn_layers(x)
x = self.classifier(x)
return x |
.parametrize('y_pred', [np.array(y_pred_list), y_pred_list])
def test_residual_normalised_conformity_score_get_conformity_scores(y_pred: NDArray) -> None:
residual_norm_score = ResidualNormalisedScore(random_state=random_state)
conf_scores = residual_norm_score.get_conformity_scores(X_toy, y_toy, y_pred)
expected_signed_conf_scores = np.array([np.nan, np.nan, .0, .0, 0.0, .0])
np.testing.assert_allclose(conf_scores, expected_signed_conf_scores) |
def test_iht_fit_resample_class_obj():
est = GradientBoostingClassifier(random_state=RND_SEED)
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
(X_resampled, y_resampled) = iht.fit_resample(X, Y)
assert (X_resampled.shape == (12, 2))
assert (y_resampled.shape == (12,)) |
def ndim_tensor2im(image_tensor, imtype=np.uint8, batch=0):
image_numpy = image_tensor[batch].cpu().float().numpy()
result = np.argmax(image_numpy, axis=0)
return result.astype(imtype) |
class docURLLink(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
self.url = url
if (mixedclass_ is None):
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if (content_ is None):
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docURLLink.subclass:
return docURLLink.subclass(*args_, **kwargs_)
else:
return docURLLink(*args_, **kwargs_)
factory = staticmethod(factory)
def get_url(self):
return self.url
def set_url(self, url):
self.url = url
def getValueOf_(self):
return self.valueOf_
def setValueOf_(self, valueOf_):
self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='docURLLink')
outfile.write('>')
self.exportChildren(outfile, (level + 1), namespace_, name_)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
if (self.url is not None):
outfile.write((' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'),)))
def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
if (self.valueOf_.find('![CDATA') > (- 1)):
value = quote_xml(('%s' % self.valueOf_))
value = value.replace('![CDATA', '<![CDATA')
value = value.replace(']]', ']]>')
outfile.write(value)
else:
outfile.write(quote_xml(('%s' % self.valueOf_)))
def hasContent_(self):
if (self.valueOf_ is not None):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docURLLink'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if (self.url is not None):
showIndent(outfile, level)
outfile.write(('url = %s,\n' % (self.url,)))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write(('valueOf_ = "%s",\n' % (self.valueOf_,)))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('url'):
self.url = attrs.get('url').value
def buildChildren(self, child_, nodeName_):
if (child_.nodeType == Node.TEXT_NODE):
obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if (child_.nodeType == Node.TEXT_NODE):
self.valueOf_ += child_.nodeValue
elif (child_.nodeType == Node.CDATA_SECTION_NODE):
self.valueOf_ += (('![CDATA[' + child_.nodeValue) + ']]') |
def normalize_final_sql(format_sql_5):
format_sql_final = format_sql_5.replace('\n', ' ').replace(' . ', '.').replace('group by', 'group_by').replace('order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value')
if (('t1' in format_sql_final) or ('t2' in format_sql_final) or ('t3' in format_sql_final) or ('t4' in format_sql_final)):
format_sql_final = format_sql_final.replace('t2.dormid', 'dorm.dormid')
format_sql_final = format_sql_final.replace('select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by population desc limit_value', 'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by city.population desc limit_value')
return format_sql_final |
def do_slice(value, slices, fill_with=None):
seq = list(value)
length = len(seq)
items_per_slice = (length // slices)
slices_with_extra = (length % slices)
offset = 0
for slice_number in range(slices):
start = (offset + (slice_number * items_per_slice))
if (slice_number < slices_with_extra):
offset += 1
end = (offset + ((slice_number + 1) * items_per_slice))
tmp = seq[start:end]
if ((fill_with is not None) and (slice_number >= slices_with_extra)):
tmp.append(fill_with)
(yield tmp) |
def densenet169(**kwargs):
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs)
return model |
class JointExtractionDecoder(DecoderBase, JointExtractionDecoderMixin):
def __init__(self, config: JointExtractionDecoderConfig):
super().__init__()
self.ck_decoder = config.ck_decoder.instantiate()
self.ck_loss_weight = config.ck_loss_weight
if config.has_attr_decoder:
self.attr_decoder = config.attr_decoder.instantiate()
self.attr_loss_weight = config.attr_loss_weight
if config.has_rel_decoder:
self.rel_decoder = config.rel_decoder.instantiate()
self.rel_loss_weight = config.rel_loss_weight
def forward(self, batch: Batch, **states):
losses = (self.ck_decoder(batch, **states) * self.ck_loss_weight)
batch_chunks_pred = self.ck_decoder.decode(batch, **states)
if self.has_attr_decoder:
self.attr_decoder.assign_chunks_pred(batch, batch_chunks_pred)
losses += (self.attr_decoder(batch, **states) * self.attr_loss_weight)
if self.has_rel_decoder:
self.rel_decoder.assign_chunks_pred(batch, batch_chunks_pred)
losses += (self.rel_decoder(batch, **states) * self.rel_loss_weight)
return losses
def decode(self, batch: Batch, **states):
batch_chunks_pred = self.ck_decoder.decode(batch, **states)
y_pred = (batch_chunks_pred,)
if self.has_attr_decoder:
self.attr_decoder.assign_chunks_pred(batch, batch_chunks_pred)
y_pred = (*y_pred, self.attr_decoder.decode(batch, **states))
if self.has_rel_decoder:
self.rel_decoder.assign_chunks_pred(batch, batch_chunks_pred)
y_pred = (*y_pred, self.rel_decoder.decode(batch, **states))
return y_pred |
def cost_matrix(width=16, dist=2):
size = (width ** 2)
C = np.zeros([size, size], dtype=np.float32)
for m_i in range(size):
i1 = (m_i // width)
j1 = (m_i % width)
for m_j in range(size):
i2 = (m_j // width)
j2 = (m_j % width)
C[(m_i, m_j)] = ((abs((i1 - i2)) ** 2) + (abs((j1 - j2)) ** 2))
C = (C / ((width - 1) ** 2))
C = torch.tensor(C)
return C |
def batcher(params, batch):
batch = [(' '.join(sent) if (sent != []) else '.') for sent in batch]
embeddings = params['google_use'](batch)
return embeddings |
def build_datasets(dataset_list: List[str], dataset_config: DictConfig, dataset_type='train'):
datasets = []
for dataset in dataset_list:
if (dataset in dataset_config):
dataset_config = dataset_config[dataset]
else:
warnings.warn((f'Dataset {dataset} is missing from dataset_config' + ' in config. Proceeding with empty config.'))
dataset_config = OmegaConf.create()
dataset_instance = build_dataset(dataset, dataset_config, dataset_type)
if (dataset_instance is None):
continue
datasets.append(dataset_instance)
return datasets |
def get_data_fields(mode, cfg):
points_transform = data.SubsamplePoints(cfg['data']['points_subsample'])
input_type = cfg['data']['input_type']
fields = {}
if (cfg['data']['points_file'] is not None):
if (input_type != 'pointcloud_crop'):
fields['points'] = data.PointsField(cfg['data']['points_file'], points_transform, unpackbits=cfg['data']['points_unpackbits'], multi_files=cfg['data']['multi_files'])
else:
fields['points'] = data.PatchPointsField(cfg['data']['points_file'], transform=points_transform, unpackbits=cfg['data']['points_unpackbits'], multi_files=cfg['data']['multi_files'])
if (mode in ('val', 'test')):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if (points_iou_file is not None):
if (input_type == 'pointcloud_crop'):
fields['points_iou'] = data.PatchPointsField(points_iou_file, unpackbits=cfg['data']['points_unpackbits'], multi_files=cfg['data']['multi_files'])
else:
fields['points_iou'] = data.PointsField(points_iou_file, unpackbits=cfg['data']['points_unpackbits'], multi_files=cfg['data']['multi_files'])
if (voxels_file is not None):
fields['voxels'] = data.VoxelsField(voxels_file)
return fields |
class ChooseInfoSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self._mp_ctx = mp.get_context('forkserver')
(self.remotes, self.work_remotes) = zip(*[self._mp_ctx.Pipe(duplex=True) for _ in range(nenvs)])
self.ps = [self._mp_ctx.Process(target=chooseinfoworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, share_observation_space, action_space) = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def reset(self, reset_choose):
for (remote, choose) in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
(obs, infos) = zip(*results)
return (np.stack(obs), np.stack(infos))
def get_short_term_goal(self, data):
for (remote, da) in zip(self.remotes, data):
remote.send(('get_short_term_goal', da))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for remote in self.remotes:
remote.send(('render', mode))
if (mode == 'rgb_array'):
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def reset_featurize_type(self, featurize_types):
for (remote, featurize_type) in zip(self.remotes, featurize_types):
remote.send(('reset_featurize_type', featurize_type)) |
class Vertex(Vrepresentation):
def type(self):
return self.VERTEX
def is_vertex(self):
return True
def _repr_(self):
return ('A vertex at ' + repr(self.vector()))
def homogeneous_vector(self, base_ring=None):
v = (list(self._vector) + [1])
return vector((base_ring or self._base_ring), v)
def evaluated_on(self, Hobj):
return ((Hobj.A() * self.vector()) + Hobj.b())
def is_integral(self):
return ((self._base_ring is ZZ) or all(((x in ZZ) for x in self))) |
def process_quote_data(quote_data):
results = []
for threshold_data in quote_data:
summed_data = [sum(col) for col in zip(*threshold_data)]
quote_precision = rounding(((100 * summed_data[0]) / (summed_data[0] + summed_data[2])))
quote_recall = rounding(((100 * summed_data[0]) / (summed_data[0] + summed_data[1])))
quote_f1 = rounding(harmonic_mean([quote_precision, quote_recall]))
speaker_match_accuracy = rounding(((100 * summed_data[3]) / summed_data[0]))
verb_match_accuracy = rounding(((100 * summed_data[4]) / summed_data[0]))
results.append([quote_precision, quote_recall, quote_f1, '-'])
results.append(((['-'] * 3) + [speaker_match_accuracy]))
results.append(((['-'] * 3) + [verb_match_accuracy]))
return results |
class Statistics(object):
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return (100 * (self.n_correct / self.n_words))
def ppl(self):
return math.exp(min((self.loss / self.n_words), 100))
def elapsed_time(self):
return (time.time() - self.start_time)
def output(self, epoch, batch, n_batches, start, ix=False):
t = self.elapsed_time()
if ix:
print((('M%2d Ep. %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; ' + '%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed') % (ix, epoch, batch, n_batches, self.accuracy(), self.ppl(), (self.n_src_words / (t + 1e-05)), (self.n_words / (t + 1e-05)), (time.time() - start))))
else:
print((('Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; ' + '%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed') % (epoch, batch, n_batches, self.accuracy(), self.ppl(), (self.n_src_words / (t + 1e-05)), (self.n_words / (t + 1e-05)), (time.time() - start))))
sys.stdout.flush()
def log(self, prefix, experiment, lr):
t = self.elapsed_time()
experiment.add_scalar_value((prefix + '_ppl'), self.ppl())
experiment.add_scalar_value((prefix + '_accuracy'), self.accuracy())
experiment.add_scalar_value((prefix + '_tgtper'), (self.n_words / t))
experiment.add_scalar_value((prefix + '_lr'), lr) |
def repr_short_to_parent(s):
from sage.groups.misc_gps.argument_groups import ArgumentGroup
from sage.misc.sage_eval import sage_eval
def extract(s):
try:
return ArgumentGroup(specification=s)
except Exception as e:
e_ag = e
e_ag.__traceback__ = None
try:
return sage_eval(s)
except Exception as e:
e_se = e
e_se.__traceback__ = None
raise combine_exceptions(ValueError(("Cannot create a parent out of '%s'." % (s,))), e_ag, e_se)
P = extract(s)
from sage.misc.lazy_import import LazyImport
if (type(P) is LazyImport):
P = P._get_object()
from sage.structure.parent import is_Parent
if (not is_Parent(P)):
raise ValueError(("'%s' does not describe a parent." % (s,)))
return P |
def TranslateY(img, v):
assert ((- 0.45) <= v <= 0.45)
if (random_mirror and (random.random() > 0.5)):
v = (- v)
v = (v * img.size[1])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) |
def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
try:
mod_warns = mod.__warningregistry__
except AttributeError:
mod_warns = {}
num_warns = len(mod_warns)
if ('version' in mod_warns):
num_warns -= 1
if (sys.version_info[:2] >= (3, 7)):
if (py37 is not None):
n_in_context = py37
elif (sys.version_info[:2] >= (3, 4)):
if (py34 is not None):
n_in_context = py34
assert_equal(num_warns, n_in_context) |
def prepare_raw_data(path_to_raw_csv):
df = pd.read_csv(path_to_raw_csv)
df = df.drop(columns=['section'])
df = df.drop(columns=['section_id'])
df = df.drop(columns=['attempt'])
df.rename(columns={'69192: Vanligen fyll i den individuella sifferkoden som du fatt pa mail tillsammans med lanken till det har quizet:': 'sifferkod'}, inplace=True)
df.rename(columns={'65723: Kon:': 'gender'}, inplace=True)
df = df.drop(columns=['0.0'])
df = df.drop(columns=['0.0.1'])
df = df.drop(columns=['0.0.2'])
df = df.drop(columns=['0.0.3'])
df.rename(columns={'65724: Antal ar verksam som veterinar:': 'years_active_as_vet'}, inplace=True)
df.rename(columns={'65725: Anvander du nagon form av smartskala i ditt arbete?': 'use_painscale_at_work'}, inplace=True)
df.rename(columns={'68929: Notera: Den har fragan ar bara till for ovning. Efter dennafraga kommer de 25 videoklippen du ska smartbedoma.\n\xa0\nPa en skalafran 0-10, hur smartpaverkad bedomer du att hasten pa filmen ar?\nEndast0 betyder "ingen smarta".\ntest_clip_0.mp4': 'practice_question'}, inplace=True)
df = df.drop(columns=['0.0.4'])
df = df.drop(columns=['1.0.1'])
df.to_csv('intermediate_save.csv')
df = df.drop(columns=['1.0.3'])
df = df.drop(columns=['1.0.4'])
df = df.drop(columns=['1.0.5'])
df = df.drop(columns=['1.0.6'])
df = df.drop(columns=['1.0.7'])
df = df.drop(columns=['1.0.8'])
df = df.drop(columns=['1.0.9'])
df = df.drop(columns=['1.0.10'])
df = df.drop(columns=['1.0.11'])
df = df.drop(columns=['1.0.12'])
df = df.drop(columns=['1.0.13'])
df = df.drop(columns=['1.0.14'])
df = df.drop(columns=['1.0.15'])
df = df.drop(columns=['1.0.16'])
df = df.drop(columns=['1.0.17'])
df = df.drop(columns=['1.0.18'])
df = df.drop(columns=['1.0.19'])
df = df.drop(columns=['1.0.20'])
df = df.drop(columns=['1.0.21'])
df = df.drop(columns=['1.0.22'])
df = df.drop(columns=['1.0.23'])
df = df.drop(columns=['1.0.24'])
df = df.drop(columns=['1.0'])
df = df.drop(columns=['1.0.2'])
df = df.drop(columns=['n correct'])
df = df.drop(columns=['n incorrect'])
df = df.drop(columns=['score'])
cols = list(df.columns)
qstrings = [('q' + str(i)) for i in range(1, 26)]
new_cols = (['submitted', 'sifferkod', 'gender', 'years_active_as_vet', 'use_painscale_at_work', 'practice_question'] + qstrings)
df.columns = new_cols
gtdf = pd.read_csv('../data/lps/random_clips_lps/ground_truth_randomclips_lps.csv')
gt = gtdf.sort_values(by=['ind'])
pain_list = list(gt['pain'])
gt_list = ['nan', 'gt', 'gender', '0', 'no', 'no']
gt_entry = (gt_list + pain_list)
df.append(gt_entry)
dflength = len(df)
df.loc[dflength] = gt_entry
gt_copy = gt
gt_copy.drop(15)
df_without_me = df.drop(28)
df = df_without_me
df.to_csv('qrename_intermediate_save.csv')
df.to_csv(path_to_cleaner_data) |
.parametrize('reference, observations, anti_ref, expected', [(tf.constant([1.0, 1.0]), None, tf.constant([(- 1.0), (- 1.0)]), (tf.constant([[(- 1.0), (- 1.0)]]), tf.constant([[1.0, 1.0]]))), (tf.constant([1.0, 1.0]), None, tf.constant([1.0, (- 1.0)]), (tf.constant([[1.0, (- 1.0)]]), tf.constant([[1.0, 1.0]]))), (tf.constant([1.0, 1.0]), tf.constant([]), tf.constant([1.0, (- 1.0)]), (tf.constant([[1.0, (- 1.0)]]), tf.constant([[1.0, 1.0]])))])
def test_default_non_dominated_partition_when_no_valid_obs(reference: tf.Tensor, observations: Optional[tf.Tensor], anti_ref: Optional[tf.Tensor], expected: tuple[(tf.Tensor, tf.Tensor)]) -> None:
npt.assert_array_equal(prepare_default_non_dominated_partition_bounds(reference, observations, anti_ref), expected) |
def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
prefix = colorstr('AutoBatch: ')
LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
device = next(model.parameters()).device
if (device.type == 'cpu'):
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
return batch_size
d = str(device).upper()
properties = torch.cuda.get_device_properties(device)
t = (properties.total_memory / (1024 ** 3))
r = (torch.cuda.memory_reserved(device) / (1024 ** 3))
a = (torch.cuda.memory_allocated(device) / (1024 ** 3))
f = (t - (r + a))
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
batch_sizes = [1, 2, 4, 8, 16]
try:
img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
y = profile(img, model, n=3, device=device)
except Exception as e:
LOGGER.warning(f'{prefix}{e}')
y = [x[2] for x in y if x]
batch_sizes = batch_sizes[:len(y)]
p = np.polyfit(batch_sizes, y, deg=1)
b = int((((f * fraction) - p[1]) / p[0]))
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {(t * fraction):.2f}G/{t:.2f}G ({(fraction * 100):.0f}%)')
return b |
class LeanPreprocessedTempVarAlloc(LeanPreprocessedCodeElement):
identifier: TypedIdentifier
resolved_type: CairoType
add_ap_instr: Optional[LeanPreprocessedAddAp]
expr: Expression
def get_exprs(self) -> List[Expression]:
return ([self.expr] if (self.expr is not None) else []) |
def _is_hardcoded_xy(args):
is_hardcoded_xy = (args.dataset in HARDCODED_JUST_XY)
return is_hardcoded_xy |
def resize_pinhole_camera(pinhole_cam, tgt_size):
(_h, _w) = tgt_size
scale_h = (_h / pinhole_cam.shape[0])
scale_w = (_w / pinhole_cam.shape[1])
(_cx, _cy) = ((pinhole_cam.cx * scale_w), (pinhole_cam.cy * scale_h))
(_fx, _fy) = ((pinhole_cam.fx * scale_w), (pinhole_cam.fy * scale_h))
cropped_pinhole_cam = PinholeCamera(_w, _h, _fx, _fy, _cx, _cy)
return cropped_pinhole_cam |
class StrLiteralBuilder(object):
def __init__(self, target_encoding):
self._bytes = BytesLiteralBuilder(target_encoding)
self._unicode = UnicodeLiteralBuilder()
def append(self, characters):
self._bytes.append(characters)
self._unicode.append(characters)
def append_charval(self, char_number):
self._bytes.append_charval(char_number)
self._unicode.append_charval(char_number)
def append_uescape(self, char_number, escape_string):
self._bytes.append(escape_string)
self._unicode.append_charval(char_number)
def getstrings(self):
return (self._bytes.getstring(), self._unicode.getstring()) |
def pixel_cross_entropy(gt, pred, lengths):
batch_size = int(gt.shape[0])
individual_loss = []
pred = torch.sigmoid(pred)
for i in range(batch_size):
length = int(lengths[i].cpu())
g = gt[i][:length]
p = pred[i][:length]
epsilon = 1e-20
individual_loss.append((- torch.sum(((g * torch.log((p + epsilon))) + ((1 - g) * torch.log(((1 - p) + epsilon)))))))
total_loss = torch.stack(individual_loss).mean()
return (total_loss, individual_loss, pred) |
class RobertaForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class SparseDropoutWithReplacementTest(hu.HypothesisTestCase):
(**hu.gcs_cpu_only)
def test_no_dropout(self, gc, dc):
X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
replacement_value = (- 1)
self.ws.create_blob('X').feed(X)
self.ws.create_blob('Lengths').feed(Lengths)
sparse_dropout_op = core.CreateOperator('SparseDropoutWithReplacement', ['X', 'Lengths'], ['Y', 'LY'], ratio=0.0, replacement_value=replacement_value)
self.ws.run(sparse_dropout_op)
Y = self.ws.blobs['Y'].fetch()
OutputLengths = self.ws.blobs['LY'].fetch()
self.assertListEqual(X.tolist(), Y.tolist(), 'Values should stay unchanged')
self.assertListEqual(Lengths.tolist(), OutputLengths.tolist(), 'Lengths should stay unchanged.')
(**hu.gcs_cpu_only)
def test_all_dropout(self, gc, dc):
X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
replacement_value = (- 1)
self.ws.create_blob('X').feed(X)
self.ws.create_blob('Lengths').feed(Lengths)
sparse_dropout_op = core.CreateOperator('SparseDropoutWithReplacement', ['X', 'Lengths'], ['Y', 'LY'], ratio=1.0, replacement_value=replacement_value)
self.ws.run(sparse_dropout_op)
y = self.ws.blobs['Y'].fetch()
lengths = self.ws.blobs['LY'].fetch()
for elem in y:
self.assertEqual(elem, replacement_value, 'Expected all negative elements when dropout ratio is 1.')
for length in lengths:
self.assertEqual(length, 1)
self.assertEqual(sum(lengths), len(y))
(**hu.gcs_cpu_only)
def test_all_dropout_empty_input(self, gc, dc):
X = np.array([]).astype(np.int64)
Lengths = np.array([0]).astype(np.int32)
replacement_value = (- 1)
self.ws.create_blob('X').feed(X)
self.ws.create_blob('Lengths').feed(Lengths)
sparse_dropout_op = core.CreateOperator('SparseDropoutWithReplacement', ['X', 'Lengths'], ['Y', 'LY'], ratio=1.0, replacement_value=replacement_value)
self.ws.run(sparse_dropout_op)
y = self.ws.blobs['Y'].fetch()
lengths = self.ws.blobs['LY'].fetch()
self.assertEqual(len(y), 1, 'Expected single dropout value')
self.assertEqual(len(lengths), 1, 'Expected single element in lengths array')
self.assertEqual(lengths[0], 1, 'Expected 1 as sole length')
self.assertEqual(sum(lengths), len(y)) |
.serializable
class MapExit(ExitNode):
def __init__(self, map: 'Map'):
super(MapExit, self).__init__()
if (map is None):
raise ValueError('Map for MapExit can not be None.')
self._map = map
def map_type():
return Map
def from_json(cls, json_obj, context=None):
try:
entry_node = context['sdfg_state'].node(int(json_obj['scope_entry']))
ret = cls(map=entry_node.map)
except (IndexError, TypeError, graph.NodeNotFoundError):
ret = cls(cls.map_type()('_', [], []))
dace.serialize.set_properties_from_json(ret, json_obj, context=context)
return ret
def map(self):
return self._map
def map(self, val):
self._map = val
def schedule(self):
return self._map.schedule
def schedule(self, val):
self._map.schedule = val
def label(self):
return self._map.label
def __str__(self):
return str(self.map) |
def train_one_epoch(run_manager, args, epoch, warmup_epochs=0, warmup_lr=0):
dynamic_net = run_manager.net
dynamic_net.train()
run_manager.run_config.train_loader.sampler.set_epoch(epoch)
MyRandomResizedCrop.EPOCH = epoch
nBatch = len(run_manager.run_config.train_loader)
data_time = AverageMeter()
losses = DistributedMetric('train_loss')
top1 = DistributedMetric('train_top1')
top5 = DistributedMetric('train_top5')
end = time.time()
for (i, (images, labels)) in enumerate(run_manager.run_config.train_loader):
data_time.update((time.time() - end))
if (epoch < warmup_epochs):
new_lr = run_manager.run_config.warmup_adjust_learning_rate(run_manager.optimizer, (warmup_epochs * nBatch), nBatch, epoch, i, warmup_lr)
else:
new_lr = run_manager.run_config.adjust_learning_rate(run_manager.optimizer, (epoch - warmup_epochs), i, nBatch)
(images, labels) = (images.cuda(), labels.cuda())
target = labels
if (args.kd_ratio > 0):
args.teacher_model.train()
with torch.no_grad():
soft_logits = args.teacher_model(images).detach()
soft_label = F.softmax(soft_logits, dim=1)
run_manager.optimizer.zero_grad()
(loss_of_subnets, acc1_of_subnets, acc5_of_subnets) = ([], [], [])
subnet_str = ''
for _ in range(args.dynamic_batch_size):
if args.independent_distributed_sampling:
subnet_seed = (os.getpid() + time.time())
else:
subnet_seed = int(('%d%.3d%.3d' % (((epoch * nBatch) + i), _, 0)))
random.seed(subnet_seed)
subnet_settings = dynamic_net.sample_active_subnet()
subnet_str += ((('%d: ' % _) + ','.join([('%s_%s' % (key, (('%.1f' % subset_mean(val, 0)) if isinstance(val, list) else val))) for (key, val) in subnet_settings.items()])) + ' || ')
output = run_manager.net(images)
if (args.kd_ratio == 0):
loss = run_manager.train_criterion(output, labels)
loss_type = 'ce'
else:
if (args.kd_type == 'ce'):
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
else:
kd_loss = F.mse_loss(output, soft_logits)
loss = ((args.kd_ratio * kd_loss) + run_manager.train_criterion(output, labels))
loss = (loss * (2 / (args.kd_ratio + 1)))
loss_type = ('%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type))
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
loss_of_subnets.append(loss)
acc1_of_subnets.append(acc1[0])
acc5_of_subnets.append(acc5[0])
loss = (loss / distributed.get_world_size())
loss.backward()
distributed.sync_grad_sum(run_manager.net)
run_manager.optimizer.step()
losses.update(list_mean(loss_of_subnets), images.size(0))
top1.update(list_mean(acc1_of_subnets), images.size(0))
top5.update(list_mean(acc5_of_subnets), images.size(0))
if (((i % 100) == 0) and (torch.distributed.get_rank() == 0)):
string = f'Epoch [{epoch}] Iter [{i}/{nBatch}] '
for (key, value) in {'task': args.task, 'phase': args.phase, 'loss': losses.avg.item(), 'top1': top1.avg.item(), 'top5': top5.avg.item(), 'R': images.size(2), 'lr': new_lr, 'loss_type': loss_type, 'seed': str(subnet_seed), 'str': subnet_str, 'data_time': data_time.avg}.items():
string += f'{key}: {value}, '
print(string)
end = time.time()
return (losses.avg.item(), top1.avg.item(), top5.avg.item()) |
(scope='module')
def continuum_compare_data(continuum_compare_data_fname, request):
compare_data = pd.HDFStore(continuum_compare_data_fname, mode='r')
def fin():
compare_data.close()
request.addfinalizer(fin)
return compare_data |
def __GCD_sequence(v, **kwargs):
if (len(v) == 0):
return ZZ(0)
if hasattr(v, 'universe'):
g = v.universe()(0)
else:
g = ZZ(0)
for vi in v:
g = vi.gcd(g, **kwargs)
return g |
def splantider(tck, n=1):
if (n < 0):
return splder(tck, (- n))
(t, c, k) = tck
sh = ((slice(None),) + ((None,) * len(c.shape[1:])))
for j in range(n):
dt = (t[(k + 1):] - t[:((- k) - 1)])
dt = dt[sh]
c = (np.cumsum((c[:((- k) - 1)] * dt), axis=0) / (k + 1))
c = np.r_[(np.zeros(((1,) + c.shape[1:])), c, ([c[(- 1)]] * (k + 2)))]
t = np.r_[(t[0], t, t[(- 1)])]
k += 1
return (t, c, k) |
def gen_config(config_file):
cfg_dict = {}
_edict2dict(cfg_dict, cfg)
with open(config_file, 'w') as f:
yaml.dump(cfg_dict, f, default_flow_style=False) |
class GradientAccumulator(object):
def __init__(self):
self._gradients = []
self._accum_steps = None
def step(self):
if (self._accum_steps is None):
self._accum_steps = tf.Variable(tf.constant(0, dtype=tf.int64), trainable=False, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
return self._accum_steps.value()
def gradients(self):
if (not self._gradients):
raise ValueError('The accumulator should be called first to initialize the gradients')
return list(((gradient.value() if (gradient is not None) else gradient) for gradient in self._gradients))
def __call__(self, gradients):
if (not self._gradients):
_ = self.step
self._gradients.extend([(tf.Variable(tf.zeros_like(gradient), trainable=False, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) if (gradient is not None) else gradient) for gradient in gradients])
if (len(gradients) != len(self._gradients)):
raise ValueError(f'Expected {len(self._gradients)} gradients, but got {len(gradients)}')
for (accum_gradient, gradient) in zip(self._gradients, gradients):
if ((accum_gradient is not None) and (gradient is not None)):
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
if (not self._gradients):
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if (gradient is not None):
gradient.assign(tf.zeros_like(gradient)) |
class SSIM(object):
def __init__(self):
pass
def evaluate(self, data_path_real, data_path_fake):
path_list_real = glob.glob(os.path.join(data_path_real, '*.jpg'))
path_list_fake = glob.glob(os.path.join(data_path_fake, '*.jpg'))
path_list_real = sorted(path_list_real)
path_list_fake = sorted(path_list_fake)
score_list = []
for i in range(len(path_list_real)):
score = self.ssim_score(path_list_real[i], path_list_fake[i])
score_list.append(score)
return (sum(score_list) / len(score_list))
def ssim_score(self, img_path_real, img_path_fake):
img_real = tf.io.read_file(img_path_real)
img_real = tf.image.decode_png(img_real)
img_fake = tf.io.read_file(img_path_fake)
img_fake = tf.image.decode_png(img_fake)
ssim = self.calculate_ssim(img_real, img_fake, dtype=tf.uint8)
return float(ssim)
def calculate_ssim(self, img_real, img_fake, dtype=tf.uint8):
if (dtype == tf.uint8):
ssim = tf.image.ssim(img_real, img_fake, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
elif (dtype == tf.float32):
img_real = tf.image.convert_image_dtype(img_real, tf.float32)
img_fake = tf.image.convert_image_dtype(img_fake, tf.float32)
ssim = tf.image.ssim(img_real, img_fake, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
return ssim |
def _Call(t, symbols, inferred_symbols):
inf_type = _dispatch(t.func, symbols, inferred_symbols)
arg_types = [_dispatch(e, symbols, inferred_symbols) for e in t.args]
for e in t.keywords:
_dispatch(e, symbols, inferred_symbols)
if inf_type:
return inf_type
name = dace.frontend.python.astutils.rname(t)
idx = name.rfind('.')
if (idx > (- 1)):
module = name[:name.rfind('.')]
else:
module = ''
if (module == 'math'):
return dtypes.result_type_of(arg_types[0], *arg_types)
if (name == 'read_channel_intel'):
return arg_types[0]
if (name in ('abs', 'log')):
return arg_types[0]
if (name in ('min', 'max')):
return dtypes.result_type_of(arg_types[0], *arg_types)
if (name in ('round',)):
return dtypes.typeclass(int)
inf_type = _infer_dtype(t)
if inf_type:
return inf_type
return None |
def write_list_to_file(strings, list_file):
with open(list_file, 'w') as f:
for s in strings:
f.write(('%s\n' % s))
pass |
class BaselineModelChunked(BaselineModel):
mp_states: List[List[nets.MessagePassingStateChunked]]
init_mp_states: List[List[nets.MessagePassingStateChunked]]
def _create_net_fns(self, hidden_dim, encode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode):
def _use_net(*args, **kwargs):
return nets.NetChunked(self._spec, hidden_dim, encode_hints, self.decode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode, self.nb_dims, self.nb_msg_passing_steps)(*args, **kwargs)
self.net_fn = hk.transform(_use_net)
pmap_args = dict(axis_name='batch', devices=jax.local_devices())
n_devices = jax.local_device_count()
(func, static_arg, extra_args) = ((jax.jit, 'static_argnums', {}) if (n_devices == 1) else (jax.pmap, 'static_broadcasted_argnums', pmap_args))
pmean = functools.partial(jax.lax.pmean, axis_name='batch')
self._maybe_pmean = (pmean if (n_devices > 1) else (lambda x: x))
extra_args[static_arg] = 4
self.jitted_grad = func(self._compute_grad, **extra_args)
extra_args[static_arg] = 5
self.jitted_feedback = func(self._feedback, donate_argnums=[0, 4], **extra_args)
extra_args[static_arg] = [3, 4]
self.jitted_accum_opt_update = func(accum_opt_update, donate_argnums=[0, 2], **extra_args)
def _init_mp_state(self, features_list: List[List[_FeaturesChunked]], rng_key: _Array):
def _empty_mp_state():
return nets.MessagePassingStateChunked(inputs=None, hints=None, is_first=None, hint_preds=None, hiddens=None, lstm_state=None)
empty_mp_states = [[_empty_mp_state() for _ in f] for f in features_list]
dummy_params = [self.net_fn.init(rng_key, f, e, False, init_mp_state=True, algorithm_index=(- 1)) for (f, e) in zip(features_list, empty_mp_states)]
mp_states = [self.net_fn.apply(d, rng_key, f, e, False, init_mp_state=True, algorithm_index=(- 1))[1] for (d, f, e) in zip(dummy_params, features_list, empty_mp_states)]
return mp_states
def init(self, features: List[List[_FeaturesChunked]], seed: _Seed):
self.mp_states = self._init_mp_state(features, jax.random.PRNGKey(seed))
self.init_mp_states = [list(x) for x in self.mp_states]
self.params = self.net_fn.init(jax.random.PRNGKey(seed), features[0], self.mp_states[0], True, init_mp_state=False, algorithm_index=(- 1))
self.opt_state = self.opt.init(self.params)
self.opt_state_skeleton = self.opt.init(jnp.zeros(1))
def predict(self, rng_key: hk.PRNGSequence, features: _FeaturesChunked, algorithm_index: Optional[int]=None):
raise NotImplementedError
def _loss(self, params, rng_key, feedback, mp_state, algorithm_index):
((output_preds, hint_preds), mp_state) = self.net_fn.apply(params, rng_key, [feedback.features], [mp_state], repred=False, init_mp_state=False, algorithm_index=algorithm_index)
nb_nodes = _nb_nodes(feedback, is_chunked=True)
total_loss = 0.0
is_first = feedback.features.is_first
is_last = feedback.features.is_last
for truth in feedback.outputs:
total_loss += losses.output_loss_chunked(truth=truth, pred=output_preds[truth.name], is_last=is_last, nb_nodes=nb_nodes)
if self.decode_hints:
for truth in feedback.features.hints:
loss = losses.hint_loss_chunked(truth=truth, pred=hint_preds[truth.name], is_first=is_first, nb_nodes=nb_nodes)
total_loss += loss
return (total_loss, (mp_state,))
def _compute_grad(self, params, rng_key, feedback, mp_state, algorithm_index):
((lss, (mp_state,)), grads) = jax.value_and_grad(self._loss, has_aux=True)(params, rng_key, feedback, mp_state, algorithm_index)
return (self._maybe_pmean(lss), mp_state, self._maybe_pmean(grads))
def _feedback(self, params, rng_key, feedback, mp_state, opt_state, algorithm_index):
((lss, (mp_state,)), grads) = jax.value_and_grad(self._loss, has_aux=True)(params, rng_key, feedback, mp_state, algorithm_index)
grads = self._maybe_pmean(grads)
(params, opt_state) = self._update_params(params, grads, opt_state, algorithm_index)
lss = self._maybe_pmean(lss)
return (lss, params, opt_state, mp_state)
def compute_grad(self, rng_key: hk.PRNGSequence, feedback: _Feedback, algorithm_index: Optional[Tuple[(int, int)]]=None) -> Tuple[(float, _Array)]:
if (algorithm_index is None):
assert (len(self._spec) == 1)
algorithm_index = (0, 0)
(length_index, algorithm_index) = algorithm_index
mp_state = self.init_mp_states[length_index][algorithm_index]
rng_keys = _maybe_pmap_rng_key(rng_key)
feedback = _maybe_pmap_reshape(feedback, split_axis=1)
mp_state = _maybe_pmap_reshape(mp_state, split_axis=0)
(loss, mp_state, grads) = self.jitted_grad(self._device_params, rng_keys, feedback, mp_state, algorithm_index)
loss = _maybe_pick_first_pmapped(loss)
grads = _maybe_pick_first_pmapped(grads)
mp_state = _maybe_restack_from_pmap(mp_state)
self.mp_states[length_index][algorithm_index] = mp_state
return (loss, grads)
def feedback(self, rng_key: hk.PRNGSequence, feedback: _Feedback, algorithm_index=None) -> float:
if (algorithm_index is None):
assert (len(self._spec) == 1)
algorithm_index = (0, 0)
(length_index, algorithm_index) = algorithm_index
mp_state = self.init_mp_states[length_index][algorithm_index]
rng_keys = _maybe_pmap_rng_key(rng_key)
feedback = _maybe_pmap_reshape(feedback, split_axis=1)
mp_state = _maybe_pmap_reshape(mp_state, split_axis=0)
(loss, self._device_params, self._device_opt_state, mp_state) = self.jitted_feedback(self._device_params, rng_keys, feedback, mp_state, self._device_opt_state, algorithm_index)
loss = _maybe_pick_first_pmapped(loss)
mp_state = _maybe_restack_from_pmap(mp_state)
self.mp_states[length_index][algorithm_index] = mp_state
return loss
def verbose_loss(self, *args, **kwargs):
raise NotImplementedError |
def show_ipython_images_slider(image_pathes_list, slider_label='', first_arg=0):
def display_f(**kwargs):
display(Image(image_pathes_list[kwargs[slider_label]]))
display(interactive(display_f, **{slider_label: IntSlider(min=0, max=(len(image_pathes_list) - 1), step=1)})) |
def generate_list_of_planets(number):
planetList = []
for i in range(number):
planetList.append(genExamplePlanet())
return planetList |
def load(data_dir, config, splits):
if (config['data.dataset'] == 'omniglot'):
ds = load_omniglot(data_dir, config, splits)
else:
raise ValueError(f"Unknow dataset: {config['data.dataset']}")
return ds |
def measure_table(rows):
widths = {}
for row in rows:
for (idx, col) in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple((y for (x, y) in sorted(widths.items()))) |
def reduce_sum(layers, embed_keep_prob=1.0, drop_func=dropout, reuse=True):
layer = tf.add_n(layers)
if (embed_keep_prob < 1):
layer = drop_func(layer, embed_keep_prob)
return layer |
def residual_block(cnn, depth, stride, pre_activation):
input_layer = cnn.top_layer
in_size = cnn.top_size
if (in_size != depth):
shortcut = cnn.apool(1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size)
padding = ((depth - in_size) // 2)
if (cnn.channel_pos == 'channels_last'):
shortcut = tf.pad(shortcut, [[0, 0], [0, 0], [0, 0], [padding, padding]])
else:
shortcut = tf.pad(shortcut, [[0, 0], [padding, padding], [0, 0], [0, 0]])
else:
shortcut = input_layer
if pre_activation:
res = cnn.batch_norm(input_layer)
res = tf.nn.relu(res)
else:
res = input_layer
cnn.conv(depth, 3, 3, stride, stride, input_layer=res, num_channels_in=in_size, use_batch_norm=True, bias=None)
if pre_activation:
res = cnn.conv(depth, 3, 3, 1, 1, activation=None, use_batch_norm=False, bias=None)
output = (shortcut + res)
else:
res = cnn.conv(depth, 3, 3, 1, 1, activation=None, use_batch_norm=True, bias=None)
output = tf.nn.relu((shortcut + res))
cnn.top_layer = output
cnn.top_size = depth |
(help='Initialize ADE20K dataset.')
('download_dir', type=str)
def main(download_dir):
dataset_dir = (Path(download_dir) / 'ade20k')
download_ade(dataset_dir, overwrite=False) |
def test_available_if_unbound_method():
est = AvailableParameterEstimator()
AvailableParameterEstimator.available_func(est)
est = AvailableParameterEstimator(available=False)
with pytest.raises(AttributeError, match="This 'AvailableParameterEstimator' has no attribute 'available_func'"):
AvailableParameterEstimator.available_func(est) |
class MLP_LeNet(nn.Module):
def __init__(self, input_nc, input_width, input_height, no_classes=10, **kwargs):
super(MLP_LeNet, self).__init__()
assert (((input_nc * input_width) * input_height) > 120)
self.fc1 = nn.Linear(((input_nc * input_width) * input_height), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, no_classes)
def forward(self, x):
out = x.view(x.size(0), (- 1))
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return F.log_softmax(out) |
class MinecraftHoleyDungeonConfig(MinecraftConfig):
problem: str = 'minecraft_3D_dungeon_holey'
weights: Dict[(str, int)] = field(default_factory=(lambda : {'regions': 0, 'path-length': 100, 'chests': 300, 'n_jump': 100, 'enemies': 100, 'nearest-enemy': 200})) |
def build_run_environment(para_dict, dl_name, dp_name, model_name, runner_name):
if (type(para_dict) is str):
para_dict = eval(para_dict)
if (type(dl_name) is str):
dl_name = eval(dl_name)
if (type(dp_name) is str):
dp_name = eval(dp_name)
if (type(model_name) is str):
model_name = eval(model_name)
if (type(runner_name) is str):
runner_name = eval(runner_name)
torch.manual_seed(para_dict['random_seed'])
torch.cuda.manual_seed(para_dict['random_seed'])
np.random.seed(para_dict['random_seed'])
os.environ['CUDA_VISIBLE_DEVICES'] = para_dict['gpu']
logging.info(('# cuda devices: %d' % torch.cuda.device_count()))
para_dict['load_data'] = True
dl_paras = utils.get_init_paras_dict(dl_name, para_dict)
logging.info(((str(dl_name) + ': ') + str(dl_paras)))
data_loader = dl_name(**dl_paras)
if ('all_his' in para_dict):
data_loader.append_his(all_his=para_dict['all_his'], max_his=para_dict['max_his'], neg_his=para_dict['neg_his'], neg_column=para_dict['neg_column'])
if (para_dict['rank'] == 1):
data_loader.label_01()
if (para_dict['drop_neg'] == 1):
data_loader.drop_neg()
para_dict['data_loader'] = data_loader
dp_paras = utils.get_init_paras_dict(dp_name, para_dict)
logging.info(((str(dp_name) + ': ') + str(dp_paras)))
data_processor = dp_name(**dp_paras)
data_processor.get_train_data(epoch=(- 1), model=model_name)
data_processor.get_validation_data(model=model_name)
data_processor.get_test_data(model=model_name)
(features, feature_dims, feature_min, feature_max) = data_loader.feature_info(include_id=model_name.include_id, include_item_features=model_name.include_item_features, include_user_features=model_name.include_user_features)
(para_dict['feature_num'], para_dict['feature_dims']) = (len(features), feature_dims)
para_dict['user_feature_num'] = len([f for f in features if f.startswith('u_')])
para_dict['item_feature_num'] = len([f for f in features if f.startswith('i_')])
para_dict['context_feature_num'] = len([f for f in features if f.startswith('c_')])
data_loader_vars = vars(data_loader)
for key in data_loader_vars:
if (key not in para_dict):
para_dict[key] = data_loader_vars[key]
model_paras = utils.get_init_paras_dict(model_name, para_dict)
logging.info(((str(model_name) + ': ') + str(model_paras)))
model = model_name(**model_paras)
model.load_model()
if (torch.cuda.device_count() > 0):
model = model.cuda()
runner_paras = utils.get_init_paras_dict(runner_name, para_dict)
logging.info(((str(runner_name) + ': ') + str(runner_paras)))
runner = runner_name(**runner_paras)
return (data_loader, data_processor, model, runner) |
def pytest_addoption(parser: Parser) -> None:
parser.addoption('--level', action='store', default=None, type=int, help='Specify test level')
parser.addoption('--beat-challenges', action='store_true', help='Spepcifies whether the test suite should attempt to beat challenges') |
def fit_model(config_data, model, train_iterator, valid_iterator=None):
if (not config_data.get('cross_valid')):
return fit_model_single(config_data, model, train_iterator, valid_iterator)
elif (config_data.get('cross_valid') == 'true'):
return fit_model_cv(config_data, model, train_iterator, valid_iterator)
else:
return fit_model_single(config_data, model, train_iterator, valid_iterator) |
class HumanoidCfg(LeggedRobotCfg):
class env(LeggedRobotCfg.env):
num_envs = 4096
num_observations = 38
num_actions = 10
episode_length_s = 5
class terrain(LeggedRobotCfg.terrain):
curriculum = False
mesh_type = 'plane'
measure_heights = False
class commands(LeggedRobotCfg.commands):
curriculum = False
max_curriculum = 1.0
num_commands = 4
resampling_time = 5.0
heading_command = False
ang_vel_command = True
class ranges():
lin_vel_x = [0, 4.5]
lin_vel_y = [(- 0.75), 0.75]
ang_vel_yaw = [(- 2.0), 2.0]
heading = [0.0, 0.0]
class init_state(LeggedRobotCfg.init_state):
reset_mode = 'reset_to_range'
penetration_check = False
pos = [0.0, 0.0, 0.75]
rot = [0.0, 0.0, 0.0, 1.0]
lin_vel = [0.0, 0.0, 0.0]
ang_vel = [0.0, 0.0, 0.0]
root_pos_range = [[0.0, 0.0], [0.0, 0.0], [0.72, 0.72], [((- torch.pi) / 10), (torch.pi / 10)], [((- torch.pi) / 10), (torch.pi / 10)], [((- torch.pi) / 10), (torch.pi / 10)]]
root_vel_range = [[(- 0.5), 0.5], [(- 0.5), 0.5], [(- 0.5), 0.5], [(- 0.5), 0.5], [(- 0.5), 0.5], [(- 0.5), 0.5]]
default_joint_angles = {'left_hip_yaw': 0.0, 'left_hip_abad': 0.0, 'left_hip_pitch': (- 0.2), 'left_knee': 0.25, 'left_ankle': 0.0, 'right_hip_yaw': 0.0, 'right_hip_abad': 0.0, 'right_hip_pitch': (- 0.2), 'right_knee': 0.25, 'right_ankle': 0.0}
dof_pos_range = {'left_hip_yaw': [(- 0.1), 0.1], 'left_hip_abad': [(- 0.2), 0.2], 'left_hip_pitch': [(- 0.2), 0.2], 'left_knee': [0.6, 0.7], 'left_ankle': [(- 0.3), 0.3], 'right_hip_yaw': [(- 0.1), 0.1], 'right_hip_abad': [(- 0.2), 0.2], 'right_hip_pitch': [(- 0.2), 0.2], 'right_knee': [0.6, 0.7], 'right_ankle': [(- 0.3), 0.3]}
dof_vel_range = {'left_hip_yaw': [(- 0.1), 0.1], 'left_hip_abad': [(- 0.1), 0.1], 'left_hip_pitch': [(- 0.1), 0.1], 'left_knee': [(- 0.1), 0.1], 'left_ankle': [(- 0.1), 0.1], 'right_hip_yaw': [(- 0.1), 0.1], 'right_hip_abad': [(- 0.1), 0.1], 'right_hip_pitch': [(- 0.1), 0.1], 'right_knee': [(- 0.1), 0.1], 'right_ankle': [(- 0.1), 0.1]}
class control(LeggedRobotCfg.control):
stiffness = {'left_hip_yaw': 30.0, 'left_hip_abad': 30.0, 'left_hip_pitch': 30.0, 'left_knee': 30.0, 'left_ankle': 30.0, 'right_hip_yaw': 30.0, 'right_hip_abad': 30.0, 'right_hip_pitch': 30.0, 'right_knee': 30.0, 'right_ankle': 30.0}
damping = {'left_hip_yaw': 5.0, 'left_hip_abad': 5.0, 'left_hip_pitch': 5.0, 'left_knee': 5.0, 'left_ankle': 5.0, 'right_hip_yaw': 5.0, 'right_hip_abad': 5.0, 'right_hip_pitch': 5.0, 'right_knee': 5.0, 'right_ankle': 5.0}
action_scale = 1.0
exp_avg_decay = None
decimation = 10
class domain_rand(LeggedRobotCfg.domain_rand):
randomize_friction = False
friction_range = [0.5, 1.25]
randomize_base_mass = False
added_mass_range = [(- 1.0), 1.0]
push_robots = True
push_interval_s = 2.5
max_push_vel_xy = 0.5
class asset(LeggedRobotCfg.asset):
file = '{LEGGED_GYM_ROOT_DIR}/resources/robots/mit_humanoid/mit_humanoid_fixed_arms.urdf'
keypoints = ['base']
end_effectors = ['left_foot', 'right_foot']
foot_name = 'foot'
terminate_after_contacts_on = ['base', 'left_upper_leg', 'left_lower_leg', 'right_upper_leg', 'right_lower_leg', 'left_upper_arm', 'right_upper_arm', 'left_lower_arm', 'right_lower_arm', 'left_hand', 'right_hand']
disable_gravity = False
disable_actions = False
disable_motors = False
self_collisions = 0
collapse_fixed_joints = False
flip_visual_attachments = False
default_dof_drive_mode = 3
class rewards(LeggedRobotCfg.rewards):
base_height_target = 0.62
soft_dof_pos_limit = 0.9
soft_dof_vel_limit = 0.9
soft_torque_limit = 0.8
only_positive_rewards = False
tracking_sigma = 0.5
class scales(LeggedRobotCfg.rewards.scales):
action_rate = (- 0.001)
action_rate2 = (- 0.0001)
tracking_lin_vel = 10.0
tracking_ang_vel = 5.0
torques = (- 0.0001)
dof_pos_limits = (- 10)
torque_limits = (- 0.01)
termination = (- 100)
ori_pb = 1.0
baseHeight_pb = 1.0
jointReg_pb = 1.0
class normalization(LeggedRobotCfg.normalization):
class obs_scales(LeggedRobotCfg.normalization.obs_scales):
base_z = (1.0 / 0.6565)
clip_observations = 100.0
clip_actions = 10.0
class noise(LeggedRobotCfg.noise):
add_noise = True
noise_level = 1.0
class noise_scales(LeggedRobotCfg.noise.noise_scales):
base_z = 0.05
dof_pos = 0.005
dof_vel = 0.01
lin_vel = 0.1
ang_vel = 0.05
gravity = 0.05
in_contact = 0.1
height_measurements = 0.1
class sim(LeggedRobotCfg.sim):
dt = 0.001
substeps = 1
gravity = [0.0, 0.0, (- 9.81)]
class physx():
max_depenetration_velocity = 10.0 |
def create_random_join(schema, no_relationships):
assert (no_relationships >= 0), 'No_relationships must be greater equal 0'
start_tables = list(schema.tables)
random.shuffle(start_tables)
start_table_obj = start_tables[0]
merged_tables = {start_table_obj.table_name}
relationships = set()
for i in range(no_relationships):
possible_next_relationships = list()
for relationship_obj in schema.relationships:
if (relationship_obj.identifier in relationships):
continue
if ((relationship_obj.start in merged_tables) and (relationship_obj.end not in merged_tables)):
possible_next_relationships.append((relationship_obj.identifier, relationship_obj.end))
elif ((relationship_obj.end in merged_tables) and (relationship_obj.start not in merged_tables)):
possible_next_relationships.append((relationship_obj.identifier, relationship_obj.start))
random.shuffle(possible_next_relationships)
if (len(possible_next_relationships) == 0):
return (list(relationships), merged_tables)
(relationship, table) = possible_next_relationships[0]
merged_tables.add(table)
relationships.add(relationship)
return (list(relationships), merged_tables) |
.dataclass
class TrainState():
step: int
variables: flax.core.FrozenDict[(str, Any)]
dynamic_scale: flax.optim.DynamicScale
opt_tx: optax.GradientTransformation = flax.struct.field(pytree_node=False)
opt_state: optax.OptState
ema: EmaState |
class ManifoldSubsetFiniteFamily(ManifoldObjectFiniteFamily):
def from_subsets_or_families(cls, *subsets_or_families):
def generate_subsets():
from sage.manifolds.subset import ManifoldSubset
for arg in subsets_or_families:
if isinstance(arg, ManifoldSubset):
(yield arg)
else:
(yield from arg)
return cls(generate_subsets())
def _repr_object_type(self):
if all((subset.is_open() for subset in self)):
return 'open subsets'
else:
return 'subsets' |
def random_hparams(algorithm, dataset, seed, larger_batch=False):
return {a: c for (a, (b, c)) in _hparams(algorithm, dataset, seed, larger_batch=larger_batch).items()} |
_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_cpu_debug_snode_writer_out_of_bound_negative():
x = ti.field(ti.f32, shape=3)
with pytest.raises(AssertionError):
x[(- 1)] = 10.0 |
class Segmentation(Chunk):
def __init__(self, array: np.ndarray, **kwargs):
super().__init__(array, **kwargs)
assert (array.ndim == 3)
assert np.issubdtype(array.dtype, np.integer)
def from_chunk(cls, chunk):
assert isinstance(chunk, Chunk)
return cls(chunk.array, voxel_offset=chunk.voxel_offset, voxel_size=chunk.voxel_size)
def evaluate(self, groundtruth, size_threshold: int=1000):
if (not np.issubdtype(self.dtype, np.uint64)):
this = self.astype(np.uint64)
else:
this = self
if (not np.issubdtype(groundtruth.dtype, np.uint64)):
groundtruth = groundtruth.astype(np.uint64)
if isinstance(groundtruth, Chunk):
groundtruth = groundtruth.array
rand_index = evaluate.rand_index(this.array, groundtruth)
adjusted_rand_index = evaluate.adj_rand_index(this.array, groundtruth)
variation_of_information = evaluate.vi(this.array, groundtruth)
fowlkes_mallows_index = evaluate.fm_index(this.array, groundtruth)
edit_distance = evaluate.edit_distance(this.array, groundtruth, size_threshold=size_threshold)
print(f'rand index: {rand_index: .3f}')
print(f'adjusted rand index: {adjusted_rand_index: .3f}')
print(f'variation of information: {variation_of_information: .3f}')
print(f'edit distance: {edit_distance}')
print(f'Fowlkes Mallows Index: {fowlkes_mallows_index: .3f}')
ret = {}
ret['rand_index'] = rand_index
ret['adjusted_rand_index'] = adjusted_rand_index
ret['variation_of_information'] = variation_of_information
ret['fowlkes_mallows_index'] = fowlkes_mallows_index
ret['edit_distance'] = edit_distance
return ret
def remap(self, base_id: int=0):
fastremap.renumber(self.array, preserve_zero=True, in_place=True)
self.array = self.array.astype(np.uint64)
if (base_id > 0):
self.array[(self.array > 0)] += base_id
new_base_id = self.max()
return new_base_id
def mask_fragments(self, voxel_num_threshold: int):
(uniq, counts) = fastremap.unique(self.array, return_counts=True)
fragment_ids = uniq[(counts <= voxel_num_threshold)]
print(f'masking out {len(fragment_ids)} fragments in {len(uniq)} with a percentage of {(len(fragment_ids) / len(uniq))}')
self.array = fastremap.mask(self.array, fragment_ids)
def mask_except(self, selected_obj_ids: Union[(str, list, set)]):
if (selected_obj_ids is None):
print('we have not selected any objects to mask out.')
return
if (isinstance(selected_obj_ids, str) and selected_obj_ids.endswith('.json')):
json_storage = CloudFiles(os.path.dirname(selected_obj_ids))
ids_str = json_storage.get(os.path.basename(selected_obj_ids))
selected_obj_ids = set(json.loads(ids_str))
assert (len(selected_obj_ids) > 0)
print(f'number of selected objects: {len(selected_obj_ids)}')
elif isinstance(selected_obj_ids, str):
selected_obj_ids = set([int(id) for id in selected_obj_ids.split(',')])
self.array = fastremap.mask_except(self.array, list(selected_obj_ids)) |
class Baseline(object):
def __init__(self, target, config={}):
(self.X_pos, self.y_pos) = ([], [])
(self.X_neg, self.y_neg) = ([], [])
self.intmd_path = 'intermediate/'
self.target = target
def load_data(self):
with open(((self.intmd_path + self.target) + '.pos.mat.pkl'), 'rb') as f:
(X_pos_mat, y_pos_mat) = pkl.load(f)
f.close()
with open(((self.intmd_path + self.target) + '.neg.mat.pkl'), 'rb') as f:
(X_neg_mat, y_neg_mat) = pkl.load(f)
f.close()
print('The number of positive samles is: ', len(y_pos_mat))
print('The number of negative samles is: ', len(y_neg_mat))
for (s, array) in X_pos_mat.items():
self.X_pos.append(np.sum(X_pos_mat[s], axis=0))
self.y_pos.append(y_pos_mat[s])
for (s, array) in X_neg_mat.items():
self.X_neg.append(np.sum(X_neg_mat[s], axis=0))
self.y_neg.append(y_neg_mat[s])
return ((self.X_pos, self.X_neg), (self.y_pos, self.y_neg))
def get_classifiers(self, X, y):
(X_pos, X_neg) = X
(y_pos, y_neg) = y
(X, y) = (np.concatenate((X_pos, X_neg), axis=0), np.concatenate((y_pos, y_neg), axis=0))
p = np.random.permutation(len(X))
(X, y) = (X[p], y[p])
n_fold = 5
skf = StratifiedKFold(n_splits=n_fold, random_state=99991)
scaler = StandardScaler()
models = {'LR': lr, 'KNN': knn, 'SVM': svm, 'RF': rf, 'XGB': xgb, 'MLP': MLP}
ifold = 0
results = dict()
Res = {'aucroc': [], 'spec': [], 'sen': [], 'aucprc': [], 'avepre': [], 'f1score': []}
for (train_index, test_index) in skf.split(X, y):
ifold += 1
print(('The %d-th fold' % ifold))
results[ifold] = dict()
(X_tr, X_te) = (X[train_index], X[test_index])
(y_tr, y_te) = (y[train_index], y[test_index])
for (k, m) in models.items():
print(('The current model for optimizing is: ' + k))
if (k == 'MLP'):
mlp = m(X_tr.shape[1], 2)
(fit_auc, fit_accuracy, fit_losses) = mlp.fit(X_tr, y_tr, X_te, y_te)
(string, auc, accuracy, loss, yhat) = mlp.evaluate(X_te, y_te)
yhat = np.array(yhat, dtype='float32')
else:
m = m.fit(X_tr, y_tr)
yhat = m.predict(X_te)
aucroc = roc_auc_score(y_te, yhat)
avepre = average_precision_score(y_te, yhat)
(tn, fp, fn, tp) = confusion_matrix(y_te, yhat).ravel()
f1score = f1_score(y_te, yhat, 'micro')
spec = (tn / (tn + fp))
sen = (tp / (tp + fn))
models[k] = m
Res['aucroc'].append(aucroc)
Res['spec'].append(spec)
Res['sen'].append(sen)
Res['aucprc'].append(aucprc)
Res['avepre'].append(avepre)
Res['f1score'].append(f1score)
print('aucroc mean: ', np.mean(np.array(Res['aucroc'])))
print('aucroc std: ', np.std(np.array(Res['aucroc'])))
print('spec mean: ', np.mean(np.array(Res['spec'])))
print('spec std: ', np.std(np.array(Res['spec'])))
print('sen mean: ', np.mean(np.array(Res['sen'])))
print('sen std: ', np.std(np.array(Res['sen'])))
print('avepre mean: ', np.mean(np.array(Res['avepre'])))
print('avepre std: ', np.std(np.array(Res['avepre'])))
print('f1score mean: ', np.mean(np.array(Res['f1score'])))
print('f1score std: ', np.std(np.array(Res['f1score']))) |
_class(removal_version='0.19.0', future_warn=True)
class SimpleSmoothDeriv(SmoothnessFirstOrder):
pass |
def get_inference_trainer_params():
return d(cls=LatentInferenceTrainer, params=d(train_every_n_steps=(1 if USE_LATENT else 0), latent_learning_rate=0.0005, log_every_n_steps=100.0, save_every_n_steps=0, train_min_buffer_size=2, max_steps_per_rollout=100, obs_to_output_obs_fn=obs_to_output_obs_fn)) |
class EvaluateOptions(TestBaseOptions):
def __init__(self):
super().__init__()
parser = self.parser
parser.add_argument('--dataset', type=str, default='mixamo', choices=['mixamo', 'humanact12'], help='on which dataset to evaluate')
parser.add_argument('--rot_only', action='store_true', help='refrain from predicting global root position when predicting rotations')
parser.add_argument('--test_model', action='store_true', help='generate motions with model and evaluate')
parser.add_argument('--test_actor', action='store_true', help='evaluate results from ACTOR model')
parser.add_argument('--act_rec_gt_path', type=str, help='path to ground truth file that was used during action recognition train. Not needed unless is different from the one used by the synthesis network')
parser.add_argument('--actor_motions_path', type=str, help='path to randomly generated actor motions')
parser.add_argument('--fast', action='store_true', help='skip metrics that require long evaluation') |
def parse_function(*metrics, directory='', args=None, end_signal=None):
print(f'Parsing files in {directory}')
subdirs = listdir_nohidden(directory, sort=True)
outputs = []
for subdir in subdirs:
fpath = osp.join(directory, subdir, 'log.txt')
assert check_isfile(fpath)
good_to_go = False
output = OrderedDict()
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if (line == end_signal):
good_to_go = True
for metric in metrics:
match = metric['regex'].search(line)
if (match and good_to_go):
if ('file' not in output):
output['file'] = fpath
num = float(match.group(1))
name = metric['name']
output[name] = num
if output:
outputs.append(output)
assert (len(outputs) > 0), f'Nothing found in {directory}'
metrics_results = defaultdict(list)
for output in outputs:
msg = ''
for (key, value) in output.items():
if isinstance(value, float):
msg += f'{key}: {value:.2f}%. '
else:
msg += f'{key}: {value}. '
if (key != 'file'):
metrics_results[key].append(value)
print(msg)
output_results = OrderedDict()
print('===')
print(f'Summary of directory: {directory}')
for (key, values) in metrics_results.items():
avg = np.mean(values)
std = (compute_ci95(values) if args.ci95 else np.std(values))
print(f'* {key}: {avg:.2f}% +- {std:.2f}%')
output_results[key] = avg
print('===')
return output_results |
def run_simulator(agents, config, treatment_assignment, seed):
population = []
for (agent, treated) in zip(agents, treatment_assignment):
agent = copy.deepcopy(agent)
if treated:
agent.risk_aversion = 0.9
population.append(agent)
return civil_violence.simulate(population, config, seed) |
def concatenate_two_boxes(box_a: spaces.Box, box_b: spaces.Box) -> spaces.Box:
if ((not isinstance(box_a, spaces.Box)) or (not isinstance(box_b, spaces.Box))):
raise ValueError('This method will only concatenate Box spaces')
lows = np.concatenate([box_a.low, box_b.low])
highs = np.concatenate([box_a.high, box_b.high])
dtype = np.result_type(*[box_a.dtype, box_b.dtype])
return spaces.Box(low=lows, high=highs, dtype=dtype) |
def has_module(module_name):
if (sys.version_info > (3, 4)):
import importlib
name_parts = module_name.split('.')
for i in range(len(name_parts)):
if (importlib.util.find_spec('.'.join(name_parts[:(i + 1)])) is None):
return False
return True
else:
import imp
try:
imp.find_module(module_name)
except ImportError:
return False
return True |
class VocabularyShared(VocabularyBase):
def __init__(self, vocab_path, data_raw_src=None, data_raw_tgt=None, lower=True):
self.lower = lower
self.id2tok = {}
self.tok2id = {}
if (not check_file_exists(vocab_path)):
assert ((data_raw_src is not None) and (data_raw_tgt is not None)), 'You need to process train data ** before ** creating a vocabulary!'
self.create_vocabulary(raw_data_src=data_raw_src, raw_data_tgt=data_raw_tgt, vocab_path=vocab_path)
else:
self.load_vocabulary(vocab_path)
def create_vocabulary(self, raw_data_src, raw_data_tgt, vocab_path):
logger.info('Creating vocabulary')
assert (type(raw_data_src) == type(raw_data_tgt) == list)
vocablist = constants.START_VOCAB
vocablist.extend(self.process_raw_data(raw_data_src))
vocablist.extend(self.process_raw_data(raw_data_tgt))
with open(vocab_path, 'w') as vocab_file:
for w in vocablist:
vocab_file.write(('%s\n' % w))
for (idx, tok) in enumerate(vocablist):
self.id2tok[idx] = tok
self.tok2id[tok] = idx
logger.info(('Created vocabulary of size %d' % self.size)) |
class TensorRef():
def __init__(self, tensor, dtype, layout) -> None:
if isinstance(tensor, np.ndarray):
ptr = cuda.CUdeviceptr(tensor.__array_interface__['data'][0])
elif (torch_available and isinstance(tensor, torch.Tensor)):
ptr = cuda.CUdeviceptr(tensor.data_ptr())
elif (cupy_available and isinstance(tensor, cp.ndarray)):
ptr = cuda.CUdeviceptr(int(tensor.data.ptr))
elif isinstance(tensor, cuda.CUdeviceptr):
ptr = tensor
elif isinstance(tensor, int):
ptr = cuda.CUdeviceptr(tensor)
else:
raise NotImplementedError(tensor)
self.tensor_ref = cutlass.get_tensor_ref(int(ptr), dtype(0), layout) |
class IMECEncoder():
def __init__(self, medium, block_size=(2 ** 8), **kwargs):
self.medium = medium
self.context = kwargs.get('context', None)
self.block_size = block_size
self.send_block_size_header = kwargs.get('send_block_size_header', None)
self.send_n_chunks_header = kwargs.get('send_n_chunks_header', True)
self.pad_last_belief_chunk = kwargs.get('pad_last_belief_chunk', True)
self.mec_mode = kwargs.get('mec_mode', 'dense')
self.mec_atol = kwargs.get('mec_atol', 1e-07)
self.mec_warning_atol = kwargs.get('mec_warning_atol', 1e-05)
self.belief_entropy_threshold = kwargs.get('belief_entropy_threshold', 1e-09)
self.clean_up_output = kwargs.get('clean_up_output', False)
self.seed = kwargs.get('seed', None)
self.g = np.random.default_rng(self.seed)
self.use_lowmem_variant = kwargs.get('use_lowmem_variant', 0)
pass
def encode(self, private_message_bit: bitarray.bitarray, context: str=None, verbose=False):
t_iter_1 = time.time()
if verbose:
print('Starting reset...')
(probs, info) = self.medium.reset(context=context)
if verbose:
print('Reset finished!')
block_sizes = [self.block_size for i in range(int((len(private_message_bit) // self.block_size)))]
if (len(private_message_bit) % self.block_size):
block_sizes += [int((len(private_message_bit) % self.block_size))]
idx = 0
msg_chunks = []
for cs in block_sizes:
msg_chunk = np.array(private_message_bit[idx:(idx + cs)].tolist()).dot((1 << np.arange(cs, dtype='int64')[::(- 1)]))
msg_chunks.append(msg_chunk)
idx += cs
if self.pad_last_belief_chunk:
beliefs = [(np.zeros((2 ** self.block_size), dtype=np.longdouble) + (1.0 / (2 ** self.block_size))) for (k, _) in enumerate(block_sizes)]
else:
beliefs = [(np.zeros((2 ** cs), dtype=np.longdouble) + (1.0 / (cs ** 2))) for (k, cs) in enumerate(block_sizes)]
stats_traj = defaultdict(list)
stats = {'block_sizes': block_sizes, 'bitlen_msg_enc': len(private_message_bit), 'loop_error': 0.0}
n_steps = 0
while True:
n_steps += 1
belief_entropies = np.array([entropy2(b) for b in beliefs])
if (max(belief_entropies) <= self.belief_entropy_threshold):
if self.clean_up_output:
if info.get('end_of_line', False):
break
else:
break
next_chunk_id = np.argmax(belief_entropies)
next_chunk_content = msg_chunks[next_chunk_id]
t1 = time.time()
t00 = time.time()
mec_dict = minimum_entropy_coupling(beliefs[next_chunk_id], probs, select_row=next_chunk_content, select_col='all', mode=self.mec_mode, algo_atol=self.mec_atol, warning_atol=self.mec_warning_atol)
t01 = (time.time() - t00)
stats_traj['mec_time_pure[s]'].append(t01)
dt = (time.time() - t1)
stats_traj['mec_step_time[s]'].append(dt)
if ('M_entropy' in mec_dict):
stats_traj['avg_HM[bit]'].append(mec_dict['M_entropy'])
stats_traj['avg_Hq[bit]'].append(mec_dict['q_entropy'])
kl_q = (mec_dict['kl_q'] if np.isfinite(mec_dict['kl_q']) else kl2((np.array(mec_dict['q_est'], dtype=np.longdouble) / np.array(mec_dict['q_est'], dtype=np.longdouble).sum()), (probs / probs.sum())))
kl_q = np.around(kl_q, decimals=18)
if (kl_q < 0.0):
z = 5
stats_traj['avg_KL'].append(kl_q)
stats_traj['avg_KL_p'].append(mec_dict['kl_p'])
stats_traj['additive_gap'].append(mec_dict['additive_gap'])
stats_traj['mec_time[s]'].append(mec_dict['mec_time[s]'])
M_row_next_chunk = mec_dict['M_selected_row']
M_row_next_chunk = (M_row_next_chunk / M_row_next_chunk.sum())
next_action = self.g.choice(M_row_next_chunk.shape[0], p=M_row_next_chunk.astype(np.float64))
belief_update = mec_dict['M_colfirst'][next_action]
beliefs[next_chunk_id] = (belief_update / belief_update.sum())
belief_entropy_delta = (belief_entropies[next_chunk_id] - entropy2(beliefs[next_chunk_id]))
delta_t_step_no_medium = (time.time() - t_iter_1)
stats_traj['enc_t_step_no_medium'].append(delta_t_step_no_medium)
t_medium_1 = time.time()
(probs, info) = self.medium.step(self.medium.action_labels[next_action])
delta_t_medium = (time.time() - t_medium_1)
stats_traj['enc_t_medium_per_step'].append(delta_t_medium)
t_iter_1 = time.time()
if ('kl(sampled|true)' in info):
stats_traj['kl(sampled|true)'].append(info['kl(sampled|true)'])
stats_traj['kl(uniform|true)'].append(info['kl(uniform|true)'])
stats_traj['kl(sampled|uniform)'].append(info['kl(sampled|uniform)'])
stats_traj['kl(uniform|sampled)'].append(info['kl(uniform|sampled)'])
stats_traj['chisquare_p(sampled|true)'].append(info['chisquare_p(sampled|true)'])
stats_traj['chisquare_p(uniform|true)'].append(info['chisquare_p(uniform|true)'])
stats_traj['medium_entropy_raw'].append(info['medium_entropy_raw'])
stats_traj['medium_entropy'].append(info['medium_entropy'])
stats_traj['medium_entropy_over_raw'].append((info['medium_entropy'] / info['medium_entropy_raw']))
stats_traj['active_belief_entropy_delta'].append(belief_entropy_delta)
stats_traj['active_belief_entropy(delta_over_raw)'].append((belief_entropy_delta / info['medium_entropy']))
stats_traj['active_belief_entropy(delta_over_raw)'].append((belief_entropy_delta / info['medium_entropy_raw']))
stats_traj['medium_logit_dim'].append(probs.shape[0])
for (k, v) in stats_traj.items():
if (k in ['kl(sampled|true)', 'kl(uniform|true)', 'kl(sampled|uniform)', 'kl(uniform|sampled)', 'chisquare_p(sampled|true)', 'chisquare_p(sampled|true)']):
continue
stats[(k + '/mean')] = np.array(v).mean()
stats[(k + '/std')] = np.array(v).std()
stats[(k + '/80')] = np.sort(np.array(v))[int((len(v) * 0.8))]
stats[(k + '/20')] = np.sort(np.array(v))[int((len(v) * 0.2))]
stats[(k + '/95')] = np.sort(np.array(v))[int((len(v) * 0.95))]
stats[(k + '/5')] = np.sort(np.array(v))[int((len(v) * 0.05))]
if ('kl(sampled|true)' in stats_traj):
i = 0
while (i < len(stats_traj['kl(sampled|true)'])):
stats['kl(sampled|true)_it{}'.format(i)] = stats_traj['kl(sampled|true)'][i]
stats['kl(uniform|true)_it{}'.format(i)] = stats_traj['kl(uniform|true)'][i]
stats['kl(uniform|sampled)_it{}'.format(i)] = stats_traj['kl(uniform|sampled)'][i]
stats['kl(sampled|uniform)_it{}'.format(i)] = stats_traj['kl(sampled|uniform)'][i]
stats['chisquare_p(sampled|true)_it{}'.format(i)] = stats_traj['chisquare_p(sampled|true)'][i]
stats['chisquare_p(uniform|true)_it{}'.format(i)] = stats_traj['chisquare_p(uniform|true)'][i]
stats['abs(kl(sampled|true),kl(uniform|true))_it{}'.format(i)] = abs((stats_traj['kl(sampled|true)'][i] - stats_traj['kl(uniform|true)'][i]))
stats['(kl(sampled|true)-kl(uniform|true))_it{}'.format(i)] = (stats_traj['kl(sampled|true)'][i] - stats_traj['kl(uniform|true)'][i])
i += 100
output = self.medium.get_output(clean_up=self.clean_up_output)
stats['n_steps'] = n_steps
stats['bits_per_step'] = (len(private_message_bit) / float(n_steps))
stats['steps_per_bit'] = (n_steps / float(len(private_message_bit)))
stats['eff'] = (len(private_message_bit) / sum(stats_traj['medium_entropy']))
stats['eff_output'] = (len(private_message_bit) / len(output))
stats['eff_raw'] = (len(private_message_bit) / sum(stats_traj['medium_entropy_raw']))
output_str = self.medium.humanify(output)
return (output_str, output, stats) |
class Resnet_Imb_YOTO_ep100_cifar100_2():
def __init__(self):
self.set_config()
def set_config(self):
self.filename_head = (self.__class__.__name__ + '_')
self.checkpoint_path = None
def get_model(self):
param_ranges = ((0.9, 0.99999),)
params = (0.999,)
param_dist = 'log1m_uniform'
param_sampler = resnet_yoto.ParamSampler(param_ranges, params, param_dist)
model = resnet_yoto.ResNet18(100, param_sampler)
return model
def get_dataset(self, return_target=True):
DOWNLOAD = False
tr_transformer = alb.Compose([albtr.Flip(p=0.5), albtr.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=15, p=0.5), albtr.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201)), albToTensor()])
ts_transformer = alb.Compose([albtr.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201)), albToTensor()])
usage_rate = (((1,) * 50) + ((0.05,) * 50))
seed = 2020
(tr_ds, tr_tg) = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer, usage_rate, seed, return_target)
(ts_ds, ts_tg) = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer, None, None, return_target)
if return_target:
return (tr_ds, ts_ds, tr_tg, ts_tg)
else:
return (tr_ds, ts_ds)
def train_model(self, use_checkpoint=False, fine_turning=False):
(tr_ds, ts_ds, tr_tg, ts_tg) = self.get_dataset(return_target=True)
if use_checkpoint:
CP = get_checkpoint(self.checkpoint_path)
else:
CP = None
model = self.get_model()
if (CP is not None):
model.load_state_dict(CP['state_dict'])
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=0.0005)
if (CP is not None):
if (not fine_turning):
opt.load_state_dict(CP['optimizer'])
tr_criterion = cb_loss.ClassBalanced_CELoss(tr_tg, 100, beta=0.999)
vl_criterion = cb_loss.ClassBalanced_CELoss(ts_tg, 100, beta=0.999)
grad_accum_steps = 1
start_epoch = (0 if ((CP is None) or fine_turning) else CP['epoch'])
EPOCHS = 100
warmup_epoch = 0
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[51, 86, 101], gamma=0.1)
model = training.train_model(model, tr_loader, ts_loader, opt, tr_criterion, vl_criterion, grad_accum_steps, start_epoch, EPOCHS, warmup_epoch, step_scheduler, self.filename_head, use_yoto=True)
training.test_yoto(model, ts_loader, param_grids=((0.0, 0.5, 0.9, 0.95, 0.99, 0.995, 0.999, 0.9995, 0.9999, 0.99995, 0.99999, 0.999995, 0.999999, 0.9999995, 0.9999999, 0., 0.),), filename_head=self.filename_head)
return |
def test_malformed1():
fname = pjoin(TEST_DATA_PATH, 'malformed1.mat')
with open(fname, 'rb') as f:
assert_raises(ValueError, loadmat, f) |
class QAData(object):
def __init__(self, logger, args, data_path, is_training):
self.data_path = data_path
if args.debug:
self.data_path = data_path.replace('train', 'dev')
with open(self.data_path, 'r') as f:
self.data = json.load(f)
if (type(self.data) == dict):
self.data = self.data['data']
if args.debug:
self.data = self.data[:40]
assert (type(self.data) == list)
assert all([('id' in d) for d in self.data]), self.data[0].keys()
if (type(self.data[0]['id']) == int):
for i in range(len(self.data)):
self.data[i]['id'] = str(self.data[i]['id'])
self.index2id = {i: d['id'] for (i, d) in enumerate(self.data)}
self.id2index = {d['id']: i for (i, d) in enumerate(self.data)}
self.is_training = is_training
self.load = (not args.debug)
self.logger = logger
self.args = args
if ('test' in self.data_path):
self.data_type = 'test'
elif ('dev' in self.data_path):
self.data_type = 'dev'
elif ('train' in self.data_path):
self.data_type = 'train'
else:
raise NotImplementedError()
self.metric = 'EM'
self.max_input_length = self.args.max_input_length
self.tokenizer = None
self.dataset = None
self.dataloader = None
self.cache = None
def __len__(self):
return len(self.data)
def decode(self, tokens):
return self.tokenizer.decode(tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True).lower()
def decode_batch(self, tokens):
return [self.decode(_tokens) for _tokens in tokens]
def flatten(self, answers):
(new_answers, metadata) = ([], [])
for answer in answers:
metadata.append((len(new_answers), (len(new_answers) + len(answer))))
new_answers += answer
return (new_answers, metadata)
def load_dataset(self, tokenizer, do_return=False):
self.tokenizer = tokenizer
postfix = tokenizer.__class__.__name__.replace('zer', 'zed')
preprocessed_path = os.path.join('/'.join(self.data_path.split('/')[:(- 1)]), self.data_path.split('/')[(- 1)].replace('.json', '-{}.json'.format(postfix)))
if (self.load and os.path.exists(preprocessed_path)):
self.logger.info('Loading pre-tokenized data from {}'.format(preprocessed_path))
with open(preprocessed_path, 'r') as f:
(input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, metadata) = json.load(f)
else:
print('Start tokenizing...')
questions = [(d['question'] if d['question'].endswith('?') else (d['question'] + '?')) for d in self.data]
answers = [d['answer'] for d in self.data]
(answers, metadata) = self.flatten(answers)
if self.args.do_lowercase:
questions = [question.lower() for question in questions]
answers = [answer.lower() for answer in answers]
if self.args.append_another_bos:
questions = [('<s> ' + question) for question in questions]
answers = [('<s> ' + answer) for answer in answers]
question_input = tokenizer.batch_encode_plus(questions, pad_to_max_length=True, max_length=self.args.max_input_length)
answer_input = tokenizer.batch_encode_plus(answers, pad_to_max_length=True)
(input_ids, attention_mask) = (question_input['input_ids'], question_input['attention_mask'])
(decoder_input_ids, decoder_attention_mask) = (answer_input['input_ids'], answer_input['attention_mask'])
if self.load:
preprocessed_data = [input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, metadata]
with open(preprocessed_path, 'w') as f:
json.dump([input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, metadata], f)
self.dataset = MyQADataset(input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, in_metadata=None, out_metadata=metadata, is_training=self.is_training)
self.logger.info('Loaded {} examples from {} data'.format(len(self.dataset), self.data_type))
if do_return:
return self.dataset
def load_dataloader(self, do_return=False):
self.dataloader = MyDataLoader(self.args, self.dataset, self.is_training)
if do_return:
return self.dataloader
def evaluate(self, predictions):
assert (len(predictions) == len(self)), (len(predictions), len(self))
ems = []
for (prediction, dp) in zip(predictions, self.data):
ems.append(get_exact_match(prediction, dp['answer']))
return ems
def save_predictions(self, predictions):
assert (len(predictions) == len(self)), (len(predictions), len(self))
prediction_dict = {dp['id']: prediction for (dp, prediction) in zip(self.data, predictions)}
save_path = os.path.join(self.args.output_dir, '{}predictions.json'.format(self.args.prefix))
with open(save_path, 'w') as f:
json.dump(prediction_dict, f)
self.logger.info('Saved prediction in {}'.format(save_path)) |
class AmazonViewSavedAddresses(VirtualFunctionTool):
name = 'AmazonViewSavedAddresses'
summary = "View the user's saved addresses."
parameters: List[ArgParameter] = []
returns: List[ArgReturn] = [{'name': 'addresses', 'type': 'array', 'description': "A list of objects, each containing 'remark', 'name', 'phone_number' and 'address'."}]
exceptions: List[ArgException] = [] |
class AEModule(nn.Module):
def __init__(self, n_features, sequence_length, hidden_size, activation=nn.Tanh):
super().__init__()
input_length = (n_features * sequence_length)
dec_steps = (2 ** np.arange(max(np.ceil(np.log2(hidden_size)), 2), np.log2(input_length))[1:])
dec_setup = np.concatenate([[hidden_size], dec_steps.repeat(2), [input_length]])
enc_setup = dec_setup[::(- 1)]
layers = np.array([[nn.Linear(int(a), int(b)), activation()] for (a, b) in enc_setup.reshape((- 1), 2)])
self.encoder = nn.Sequential(*layers.flatten()[:(- 1)])
layers = np.array([[nn.Linear(int(a), int(b)), activation()] for (a, b) in dec_setup.reshape((- 1), 2)])
self.decoder = nn.Sequential(*layers.flatten()[:(- 1)])
def forward(self, x, return_latent=False):
enc = self.encoder(x.view(x.shape[0], (- 1)).float())
dec = self.decoder(enc)
recon_x = dec.view(x.shape)
return ((recon_x, enc) if return_latent else recon_x) |
def check_scalar(x, name, target_type, *, min_val=None, max_val=None, include_boundaries='both'):
def type_name(t):
module = t.__module__
qualname = t.__qualname__
if (module == 'builtins'):
return qualname
elif (t == numbers.Real):
return 'float'
elif (t == numbers.Integral):
return 'int'
return f'{module}.{qualname}'
if (not isinstance(x, target_type)):
if isinstance(target_type, tuple):
types_str = ', '.join((type_name(t) for t in target_type))
target_type_str = f'{{{types_str}}}'
else:
target_type_str = type_name(target_type)
raise TypeError(f'{name} must be an instance of {target_type_str}, not {type(x).__qualname__}.')
expected_include_boundaries = ('left', 'right', 'both', 'neither')
if (include_boundaries not in expected_include_boundaries):
raise ValueError(f'Unknown value for `include_boundaries`: {repr(include_boundaries)}. Possible values are: {expected_include_boundaries}.')
if ((max_val is None) and (include_boundaries == 'right')):
raise ValueError("`include_boundaries`='right' without specifying explicitly `max_val` is inconsistent.")
if ((min_val is None) and (include_boundaries == 'left')):
raise ValueError("`include_boundaries`='left' without specifying explicitly `min_val` is inconsistent.")
comparison_operator = (operator.lt if (include_boundaries in ('left', 'both')) else operator.le)
if ((min_val is not None) and comparison_operator(x, min_val)):
raise ValueError(f"{name} == {x}, must be {('>=' if (include_boundaries in ('left', 'both')) else '>')} {min_val}.")
comparison_operator = (operator.gt if (include_boundaries in ('right', 'both')) else operator.ge)
if ((max_val is not None) and comparison_operator(x, max_val)):
raise ValueError(f"{name} == {x}, must be {('<=' if (include_boundaries in ('right', 'both')) else '<')} {max_val}.")
return x |
def render_requirements(filename):
pinned = get_pinned_packages()
with open(filename) as fin:
contents = ''.join(fin.readlines())
return contents.format(**pinned) |
def convert_tokens_to_ids(vocab, tokens):
ids = []
for token in tokens:
if (token in SPECIAL_TOKEN_MAPPING):
token = SPECIAL_TOKEN_MAPPING[token]
ids.append(vocab[token])
return ids |
def test_copy_touch():
form = ak.forms.NumpyForm('int64', form_key='buffer')
(layout, report) = typetracer_with_report(form)
typetracer.asarray(layout.data, dtype=np.int32)
assert (report.data_touched == ['buffer']) |
class TFSpeech2TextPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def is_valid_data(object):
is_sampled_data = hasattr(object, 'resample')
try:
has_nevents = hasattr(object, 'nevents')
except RuntimeError:
if is_sampled_data:
object.resample()
has_nevents = hasattr(object, 'nevents')
else:
has_nevents = False
has_weights = hasattr(object, 'weights')
has_set_weights = hasattr(object, 'set_weights')
has_space = hasattr(object, 'space')
is_histlike = isinstance(object, uhi.typing.plottable.PlottableHistogram)
return ((has_nevents and has_weights and has_set_weights and has_space) or is_histlike) |
class NormalisedRastriginBenchmark(Benchmark):
def __init__(self, nb_features: int=2):
self.nb_features = nb_features
ind_domain = (0.0, 1.0)
super().__init__(fn=algorithms.partial(illumination_rastrigin_normalised, nb_features=nb_features), ind_domain=ind_domain, fitness_domain=((0.0, 1.0),), features_domain=((ind_domain,) * nb_features), default_task='minimisation') |
class UniversalImageQualityIndexMetric(Metric):
def __init__(self):
self._metric = None
self._device = get_torch_device()
def __repr__(self):
return 'UniversalImageQualityIndexMetric()'
def evaluate(self, scenario_state: ScenarioState, metric_service: MetricService, eval_cache_path: str, parallelism: int) -> MetricResult:
hlog(f"Setting parallelism from {parallelism} to 1, since computing UIQI with parallelism > 1 isn't supported.")
return super().evaluate(scenario_state, metric_service, eval_cache_path, parallelism=1)
def evaluate_generation(self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService, eval_cache_path: str) -> List[Stat]:
assert (request_state.result is not None)
request_result: RequestResult = request_state.result
image_locations: List[str] = gather_generated_image_locations(request_result)
if (len(image_locations) == 0):
return []
gold_image_path: str = get_gold_image_location(request_state)
score: float = self._compute_uiqi_scores(image_locations, gold_image_path)
if (math.isnan(score) or (score == (- math.inf)) or (score == math.inf)):
return []
return [Stat(MetricName('expected_uiqi_score')).add(score)]
def _compute_uiqi_scores(self, generated_image_locations: List[str], reference_image_path: str) -> float:
try:
from torchmetrics import UniversalImageQualityIndex
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ['heim'])
if (self._metric is None):
self._metric = UniversalImageQualityIndex().to(self._device)
preprocessing = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
generated_images: List[torch.Tensor] = []
reference_images: List[torch.Tensor] = []
for location in generated_image_locations:
image = preprocessing(open_image(location))
generated_images.append(image)
image = preprocessing(open_image(reference_image_path))
reference_images.append(image)
img1: torch.Tensor = torch.stack(generated_images).to(self._device)
img2: torch.Tensor = torch.stack(reference_images).to(self._device)
score: float = self._metric(img1, img2).detach().item()
return score |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.