code stringlengths 281 23.7M |
|---|
.skipif((sys.version_info < (3, 8)), reason='added in 3.8')
def test_legacy_display_without_fields_warns(fake_object_no_id):
printer = v4_cli.LegacyPrinter()
with mock.patch('builtins.print') as mocked:
printer.display(fake_object_no_id, obj=fake_object_no_id)
assert ('No default fields to show' in mocked.call_args.args[0]) |
def get_mask(image, net, size=224):
(image_h, image_w) = (image.shape[0], image.shape[1])
down_size_image = cv2.resize(image, (size, size))
down_size_image = cv2.cvtColor(down_size_image, cv2.COLOR_BGR2RGB)
down_size_image = torch.from_numpy(down_size_image).float().div(255.0).unsqueeze(0)
down_size_image = np.transpose(down_size_image, (0, 3, 1, 2)).to(device)
down_size_image = TF.normalize(down_size_image, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
mask: torch.nn.Module = net(down_size_image)
mask = mask.argmax(dim=1).squeeze()
mask_cv2 = (mask.data.cpu().numpy() * 255)
mask_cv2 = mask_cv2.astype(np.uint8)
mask_cv2 = cv2.resize(mask_cv2, (image_w, image_h))
return mask_cv2 |
def check_type_arguments(graph: Graph, scc: list[str], errors: Errors) -> None:
for module in scc:
state = graph[module]
assert state.tree
analyzer = TypeArgumentAnalyzer(errors, state.options, state.tree.is_typeshed_file(state.options), state.manager.semantic_analyzer.named_type)
with state.wrap_context():
with mypy.state.state.strict_optional_set(state.options.strict_optional):
state.tree.accept(analyzer) |
def save_checkpoint(state, save_dir, is_best=False, remove_module_from_keys=True, model_name=''):
mkdir_if_missing(save_dir)
if remove_module_from_keys:
state_dict = state['state_dict']
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if k.startswith('module.'):
k = k[7:]
new_state_dict[k] = v
state['state_dict'] = new_state_dict
epoch = state['epoch']
if (not model_name):
model_name = ('model.pth.tar-' + str(epoch))
fpath = osp.join(save_dir, model_name)
torch.save(state, fpath)
print('Checkpoint saved to "{}"'.format(fpath))
checkpoint_file = osp.join(save_dir, 'checkpoint')
checkpoint = open(checkpoint_file, 'w+')
checkpoint.write('{}\n'.format(osp.basename(fpath)))
checkpoint.close()
if is_best:
best_fpath = osp.join(osp.dirname(fpath), 'model-best.pth.tar')
shutil.copy(fpath, best_fpath)
print('Best checkpoint saved to "{}"'.format(best_fpath)) |
class SpecVersionType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, major=None, minor=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.major = major
self.major_nsprefix_ = None
self.minor = minor
self.minor_nsprefix_ = None
def factory(*args_, **kwargs_):
if (CurrentSubclassModule_ is not None):
subclass = getSubclassFromModule_(CurrentSubclassModule_, SpecVersionType)
if (subclass is not None):
return subclass(*args_, **kwargs_)
if SpecVersionType.subclass:
return SpecVersionType.subclass(*args_, **kwargs_)
else:
return SpecVersionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_major(self):
return self.major
def set_major(self, major):
self.major = major
def get_minor(self):
return self.minor
def set_minor(self, minor):
self.minor = minor
def has__content(self):
if ((self.major is not None) or (self.minor is not None)):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_=' xmlns:None="urn:schemas-upnp-org:service-1-0" ', name_='SpecVersionType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SpecVersionType')
if (imported_ns_def_ is not None):
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if ((self.original_tagname_ is not None) and (name_ == 'SpecVersionType')):
name_ = self.original_tagname_
if (UseCapturedNS_ and self.ns_prefix_):
namespaceprefix_ = (self.ns_prefix_ + ':')
showIndent(outfile, level, pretty_print)
outfile.write(('<%s%s%s' % (namespaceprefix_, name_, ((namespacedef_ and (' ' + namespacedef_)) or ''))))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SpecVersionType')
if self.has__content():
outfile.write(('>%s' % (eol_,)))
self._exportChildren(outfile, (level + 1), namespaceprefix_, namespacedef_, name_='SpecVersionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write(('</%s%s>%s' % (namespaceprefix_, name_, eol_)))
else:
outfile.write(('/>%s' % (eol_,)))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SpecVersionType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_=' xmlns:None="urn:schemas-upnp-org:service-1-0" ', name_='SpecVersionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if (self.major is not None):
namespaceprefix_ = ((self.major_nsprefix_ + ':') if (UseCapturedNS_ and self.major_nsprefix_) else '')
showIndent(outfile, level, pretty_print)
outfile.write(('<%smajor>%s</%smajor>%s' % (namespaceprefix_, self.gds_format_integer(self.major, input_name='major'), namespaceprefix_, eol_)))
if (self.minor is not None):
namespaceprefix_ = ((self.minor_nsprefix_ + ':') if (UseCapturedNS_ and self.minor_nsprefix_) else '')
showIndent(outfile, level, pretty_print)
outfile.write(('<%sminor>%s</%sminor>%s' % (namespaceprefix_, self.gds_format_integer(self.minor, input_name='minor'), namespaceprefix_, eol_)))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[(- 1)]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if ((nodeName_ == 'major') and child_.text):
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'major')
ival_ = self.gds_validate_integer(ival_, node, 'major')
self.major = ival_
self.major_nsprefix_ = child_.prefix
elif ((nodeName_ == 'minor') and child_.text):
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'minor')
ival_ = self.gds_validate_integer(ival_, node, 'minor')
self.minor = ival_
self.minor_nsprefix_ = child_.prefix |
def _copy_command_options(pyproject: dict, dist: 'Distribution', filename: _Path):
tool_table = pyproject.get('tool', {})
cmdclass = tool_table.get('setuptools', {}).get('cmdclass', {})
valid_options = _valid_command_options(cmdclass)
cmd_opts = dist.command_options
for (cmd, config) in pyproject.get('tool', {}).get('distutils', {}).items():
cmd = json_compatible_key(cmd)
valid = valid_options.get(cmd, set())
cmd_opts.setdefault(cmd, {})
for (key, value) in config.items():
key = json_compatible_key(key)
cmd_opts[cmd][key] = (str(filename), value)
if (key not in valid):
_logger.warning(f'Command option {cmd}.{key} is not defined') |
class AFT_FULL(nn.Module):
def __init__(self, d_model, n=49, simple=False):
super(AFT_FULL, self).__init__()
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
if simple:
self.position_biases = torch.zeros((n, n))
else:
self.position_biases = nn.Parameter(torch.ones((n, n)))
self.d_model = d_model
self.n = n
self.sigmoid = nn.Sigmoid()
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if (m.bias is not None):
init.constant_(m.bias, 0)
def forward(self, input):
(bs, n, dim) = input.shape
q = self.fc_q(input)
k = self.fc_k(input).view(1, bs, n, dim)
v = self.fc_v(input).view(1, bs, n, dim)
numerator = torch.sum((torch.exp((k + self.position_biases.view(n, 1, (- 1), 1))) * v), dim=2)
denominator = torch.sum(torch.exp((k + self.position_biases.view(n, 1, (- 1), 1))), dim=2)
out = (numerator / denominator)
out = (self.sigmoid(q) * out.permute(1, 0, 2))
return out |
def test_conv_module():
with pytest.raises(AssertionError):
conv_cfg = 'conv'
ConvModule(3, 8, 2, conv_cfg=conv_cfg)
with pytest.raises(AssertionError):
norm_cfg = 'norm'
ConvModule(3, 8, 2, norm_cfg=norm_cfg)
with pytest.raises(KeyError):
act_cfg = dict(type='softmax')
ConvModule(3, 8, 2, act_cfg=act_cfg)
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
assert conv.with_activation
assert hasattr(conv, 'activate')
assert conv.with_norm
assert hasattr(conv, 'norm')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = ConvModule(3, 8, 2)
assert conv.with_activation
assert hasattr(conv, 'activate')
assert (not conv.with_norm)
assert (conv.norm is None)
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = ConvModule(3, 8, 2, act_cfg=None)
assert (not conv.with_norm)
assert (conv.norm is None)
assert (not conv.with_activation)
assert (not hasattr(conv, 'activate'))
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv_module = ConvModule(3, 8, 2, conv_cfg=dict(type='ExampleConv'), act_cfg=None)
assert torch.equal(conv_module.conv.conv0.weight, torch.zeros(8, 3, 2, 2))
conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
assert hasattr(conv.conv, 'weight_orig')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect')
assert isinstance(conv.padding_layer, nn.ReflectionPad2d)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
with pytest.raises(KeyError):
conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists')
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
assert isinstance(conv.activate, nn.LeakyReLU)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Tanh'))
assert isinstance(conv.activate, nn.Tanh)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Sigmoid'))
assert isinstance(conv.activate, nn.Sigmoid)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='PReLU'))
assert isinstance(conv.activate, nn.PReLU)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSwish'))
assert isinstance(conv.activate, HSwish)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSigmoid'))
assert isinstance(conv.activate, HSigmoid)
output = conv(x)
assert (output.shape == (1, 8, 256, 256)) |
.parametrize('not_redefine', ['_evaluate_data', '_make_result'])
def test_SKCDecisionMakerABC_not_redefined(not_redefine):
content = {'_skcriteria_parameters': []}
for method_name in ['_evaluate_data', '_make_result', '_validate_data']:
if (method_name != not_redefine):
content[method_name] = (lambda **kws: None)
Foo = type('Foo', (SKCDecisionMakerABC,), content)
with pytest.raises(TypeError):
Foo() |
class EuropeanCallExpectedValue(UncertaintyProblem):
def __init__(self, uncertainty_model: UnivariateDistribution, strike_price: float, c_approx: float, i_state: Optional[Union[(List[int], np.ndarray)]]=None, i_compare: Optional[int]=None, i_objective: Optional[int]=None) -> None:
super().__init__((uncertainty_model.num_target_qubits + 2))
self._uncertainty_model = uncertainty_model
self._strike_price = strike_price
self._c_approx = c_approx
if (i_state is None):
i_state = list(range(uncertainty_model.num_target_qubits))
self.i_state = i_state
if (i_compare is None):
i_compare = uncertainty_model.num_target_qubits
self.i_compare = i_compare
if (i_objective is None):
i_objective = (uncertainty_model.num_target_qubits + 1)
self.i_objective = i_objective
lower = uncertainty_model.low
upper = uncertainty_model.high
self._mapped_strike_price = int(np.round((((strike_price - lower) / (upper - lower)) * (uncertainty_model.num_values - 1))))
self._comparator = IntegerComparator(uncertainty_model.num_target_qubits, self._mapped_strike_price)
self.offset_angle_zero = ((np.pi / 4) * (1 - self._c_approx))
if (self._mapped_strike_price < (uncertainty_model.num_values - 1)):
self.offset_angle = ((((((- 1) * np.pi) / 2) * self._c_approx) * self._mapped_strike_price) / ((uncertainty_model.num_values - self._mapped_strike_price) - 1))
self.slope_angle = (((np.pi / 2) * self._c_approx) / ((uncertainty_model.num_values - self._mapped_strike_price) - 1))
else:
self.offset_angle = 0
self.slope_angle = 0
def value_to_estimation(self, value):
estimator = ((value - (1 / 2)) + ((np.pi / 4) * self._c_approx))
estimator *= ((2 / np.pi) / self._c_approx)
estimator *= ((self._uncertainty_model.num_values - self._mapped_strike_price) - 1)
estimator *= ((self._uncertainty_model.high - self._uncertainty_model.low) / (self._uncertainty_model.num_values - 1))
return estimator
def required_ancillas(self):
num_uncertainty_ancillas = self._uncertainty_model.required_ancillas()
num_comparator_ancillas = self._comparator.num_ancillas
num_ancillas = int(np.maximum(num_uncertainty_ancillas, num_comparator_ancillas))
return num_ancillas
def build(self, qc, q, q_ancillas=None, params=None):
q_state = [q[i] for i in self.i_state]
q_compare = q[self.i_compare]
q_objective = q[self.i_objective]
self._uncertainty_model.build(qc, q_state, q_ancillas)
qubits = (q_state[:] + [q_compare])
if q_ancillas:
qubits += q_ancillas[:self._comparator.num_ancillas]
qc.append(self._comparator.to_instruction(), qubits)
qc.ry((2 * self.offset_angle_zero), q_objective)
qc.cry((2 * self.offset_angle), q_compare, q_objective)
for (i, q_i) in enumerate(q_state):
qc.mcry(((2 * self.slope_angle) * (2 ** i)), [q_compare, q_i], q_objective, None) |
_equal.register(mappingproxy, mappingproxy)
def asssert_mappingproxy_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(set(result), set(expected), msg, (path + ('.keys()',)), 'key')
failures = []
for (k, resultv) in iteritems(result):
expectedv = expected[k]
try:
assert_equal(resultv, expectedv, path=(path + (('[%r]' % (k,)),)), msg=msg, **kwargs)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures)) |
def freeze_layers(model, layers=2, use_fcn=False):
if use_fcn:
if (layers >= 2):
model.module.conv1.eval()
model.module.bn1.eval()
model.module.layer1.eval()
model.module.layer2.eval()
for (name, param) in model.module.conv1.named_parameters():
param.requires_grad = False
for (name, param) in model.module.bn1.named_parameters():
param.requires_grad = False
for (name, param) in model.module.layer1.named_parameters():
param.requires_grad = False
for (name, param) in model.module.layer2.named_parameters():
param.requires_grad = False
if (layers >= 3):
model.module.layer3.eval()
for (name, param) in model.module.layer3.named_parameters():
param.requires_grad = False
if (layers >= 4):
model.module.layer4.eval()
for (name, param) in model.module.layer4.named_parameters():
param.requires_grad = False
return model
else:
if (layers >= 2):
model.module.backbone.conv1.eval()
model.module.backbone.bn1.eval()
model.module.backbone.layer1.eval()
model.module.backbone.layer2.eval()
for (name, param) in model.module.backbone.conv1.named_parameters():
param.requires_grad = False
for (name, param) in model.module.backbone.bn1.named_parameters():
param.requires_grad = False
for (name, param) in model.module.backbone.layer1.named_parameters():
param.requires_grad = False
for (name, param) in model.module.backbone.layer2.named_parameters():
param.requires_grad = False
if (layers >= 3):
model.module.backbone.layer3.eval()
for (name, param) in model.module.backbone.layer3.named_parameters():
param.requires_grad = False
if (layers >= 4):
model.module.backbone.layer4.eval()
for (name, param) in model.module.backbone.layer4.named_parameters():
param.requires_grad = False
return model |
class OnDeletedMessages():
def on_deleted_messages(self=None, filters=None, group: int=0) -> Callable:
def decorator(func: Callable) -> Callable:
if isinstance(self, pyrogram.Client):
self.add_handler(pyrogram.handlers.DeletedMessagesHandler(func, filters), group)
elif (isinstance(self, Filter) or (self is None)):
if (not hasattr(func, 'handlers')):
func.handlers = []
func.handlers.append((pyrogram.handlers.DeletedMessagesHandler(func, self), (group if (filters is None) else filters)))
return func
return decorator |
def add_railing_to_stairs(bm, top_faces, normal, prop):
steps = sort_faces(top_faces, normal)
first_step = steps[0]
last_step = steps[(- 1)]
(offset, corner_pw) = (prop.rail.offset, prop.rail.corner_post_width)
if prop.landing:
(v1, v2) = railing_verts(bm, sort_verts(first_step.verts, normal)[:2], normal, offset, (corner_pw / 2))
(v3, v4) = railing_verts(bm, sort_verts(first_step.verts, normal)[(- 2):], normal, offset, ((- corner_pw) / 2))
(v5, v6) = railing_verts(bm, sort_verts(last_step.verts, normal)[:2], normal, offset, (prop.step_width - (corner_pw / 2)))
e1 = bmesh.ops.contextual_create(bm, geom=(v1, v3))['edges'][0]
e2 = bmesh.ops.contextual_create(bm, geom=[v3, v5])['edges'][0]
e3 = bmesh.ops.contextual_create(bm, geom=[v2, v4])['edges'][0]
e4 = bmesh.ops.contextual_create(bm, geom=[v4, v6])['edges'][0]
railing_edges = [e1, e2, e3, e4]
else:
(v1, v2) = railing_verts(bm, sort_verts(first_step.verts, normal)[:2], normal, offset, (corner_pw / 2))
(v3, v4) = railing_verts(bm, sort_verts(last_step.verts, normal)[:2], normal, offset, (prop.step_width - (corner_pw / 2)))
e1 = bmesh.ops.contextual_create(bm, geom=(v1, v3))['edges'][0]
e2 = bmesh.ops.contextual_create(bm, geom=[v2, v4])['edges'][0]
railing_edges = [e1, e2]
ret = bmesh.ops.extrude_edge_only(bm, edges=railing_edges)
top_edges = filter_geom(ret['geom'], BMEdge)
top_verts = list({v for e in top_edges for v in e.verts})
bmesh.ops.translate(bm, verts=top_verts, vec=(Vector((0.0, 0.0, 1.0)) * prop.rail.corner_post_height))
railing_faces = filter_geom(ret['geom'], BMFace)
prop.rail.show_extra_props = (prop.rail.fill == 'WALL')
res = create_railing(bm, railing_faces, prop.rail, normal)
post_process_railing(bm, res, prop) |
def tpu_cross_replica_concat(tensor, tpu_context=None):
if ((tpu_context is None) or (tpu_context.num_replicas <= 1)):
return tensor
num_replicas = tpu_context.num_replicas
with tf.name_scope('tpu_cross_replica_concat'):
ext_tensor = tf.scatter_nd(indices=[[xla.replica_id()]], updates=[tensor], shape=([num_replicas] + tensor.shape.as_list()))
ext_tensor = tf.tpu.cross_replica_sum(ext_tensor)
return tf.reshape(ext_tensor, ([(- 1)] + ext_tensor.shape.as_list()[2:])) |
class TestUnsatCores(TestCase):
def _helper_check_examples(self, solver_name):
for (f, _, satisfiability, logic) in get_example_formulae():
if (not logic.quantifier_free):
continue
if (satisfiability == False):
with UnsatCoreSolver(name=solver_name, unsat_cores_mode='named') as solver:
if (logic not in solver.LOGICS):
continue
clauses = [f]
if f.is_and():
clauses = f.args()
for (i, c) in enumerate(clauses):
solver.add_assertion(c, ('a%d' % i))
try:
r = solver.solve()
self.assertFalse(r)
except SolverReturnedUnknownResultError:
if (QF_BV <= logic):
continue
else:
raise
core = solver.get_named_unsat_core()
self.assertTrue((len(core) <= len(clauses)))
for k in core.values():
self.assertIn(k, clauses)
self.assertTrue(is_unsat(And(core.values()), logic=logic))
(QF_BOOL)
def test_basic(self):
x = Symbol('x')
with UnsatCoreSolver(logic=QF_BOOL) as s:
s.add_assertion(x)
s.add_assertion(Not(x))
r = s.solve()
self.assertFalse(r)
core = s.get_unsat_core()
self.assertEqual(len(core), 2)
self.assertIn(x, core)
self.assertIn(Not(x), core)
named_core = s.get_named_unsat_core()
self.assertEqual(len(core), 2)
self.assertIn(x, named_core.values())
self.assertIn(Not(x), named_core.values())
(QF_BOOL)
def test_shortcut(self):
x = Symbol('x')
core = get_unsat_core([x, Not(x)])
self.assertEqual(len(core), 2)
self.assertIn(x, core)
self.assertIn(Not(x), core)
(QF_BOOL)
def test_generators_in_shortcuts(self):
flist = [Symbol('x'), Not(Symbol('x'))]
gen_f = (x for x in flist)
ucore = get_unsat_core(gen_f)
self.assertEqual(len(ucore), 2)
(QF_BOOL)
def test_basic_named(self):
x = Symbol('x')
with UnsatCoreSolver(logic=QF_BOOL, unsat_cores_mode='named') as s:
s.add_assertion(x, named='a1')
s.add_assertion(Not(x), named='a2')
r = s.solve()
self.assertFalse(r)
core = s.get_unsat_core()
self.assertEqual(len(core), 2)
self.assertIn(x, core)
self.assertIn(Not(x), core)
named_core = s.get_named_unsat_core()
self.assertEqual(len(named_core), 2)
self.assertIn('a1', named_core)
self.assertIn('a2', named_core)
self.assertEqual(named_core['a1'], x)
self.assertEqual(named_core['a2'], Not(x))
(QF_BOOL)
def test_modify_state(self):
x = Symbol('x')
with UnsatCoreSolver(logic=QF_BOOL) as s:
s.add_assertion(x)
s.push()
s.add_assertion(Not(x))
r = s.solve()
self.assertFalse(r)
s.pop()
with self.assertRaises(SolverStatusError):
s.get_unsat_core()
(QF_BOOL)
def test_modify_state_assert(self):
x = Symbol('x')
with UnsatCoreSolver(logic=QF_BOOL) as s:
s.add_assertion(x)
s.add_assertion(Not(x))
r = s.solve()
self.assertFalse(r)
s.add_assertion(Symbol('y'))
with self.assertRaises(SolverStatusError):
s.get_unsat_core()
(QF_LIA)
def test_named_unsat_core_with_assumptions(self):
i0 = Int(0)
a = GT(Symbol('a', INT), i0)
b = GT(Symbol('b', INT), i0)
c = GT(Symbol('c', INT), i0)
n_a = Not(a)
n_b = Not(b)
n_c = Not(c)
formulae = [Or(b, n_a), Or(c, n_a), Or(n_a, n_b, n_c)]
with UnsatCoreSolver(logic=QF_LIA, unsat_cores_mode='named') as solver:
for (i, f) in enumerate(formulae):
solver.add_assertion(f, named=f'f{i}')
sat = solver.solve([a])
self.assertFalse(sat)
('msat')
def test_examples_msat(self):
self._helper_check_examples('msat')
('z3')
def test_examples_z3(self):
self._helper_check_examples('z3')
('msat')
def test_unsat_core_on_regular_solver(self):
x = Symbol('x')
with Solver(name='msat') as s:
s.add_assertion(x)
s.add_assertion(Not(x))
r = s.solve()
self.assertFalse(r)
with self.assertRaises(SolverNotConfiguredForUnsatCoresError):
s.get_unsat_core() |
class TypeTriggersVisitor(TypeVisitor[List[str]]):
def __init__(self, use_logical_deps: bool, seen_aliases: (set[TypeAliasType] | None)=None) -> None:
self.deps: list[str] = []
self.seen_aliases: set[TypeAliasType] = (seen_aliases or set())
self.use_logical_deps = use_logical_deps
def get_type_triggers(self, typ: Type) -> list[str]:
return get_type_triggers(typ, self.use_logical_deps, self.seen_aliases)
def visit_instance(self, typ: Instance) -> list[str]:
trigger = make_trigger(typ.type.fullname)
triggers = [trigger]
for arg in typ.args:
triggers.extend(self.get_type_triggers(arg))
if typ.last_known_value:
triggers.extend(self.get_type_triggers(typ.last_known_value))
if (typ.extra_attrs and typ.extra_attrs.mod_name):
triggers.append(make_wildcard_trigger(typ.extra_attrs.mod_name))
return triggers
def visit_type_alias_type(self, typ: TypeAliasType) -> list[str]:
if (typ in self.seen_aliases):
return []
self.seen_aliases.add(typ)
assert (typ.alias is not None)
trigger = make_trigger(typ.alias.fullname)
triggers = [trigger]
for arg in typ.args:
triggers.extend(self.get_type_triggers(arg))
triggers.extend(self.get_type_triggers(typ.alias.target))
return triggers
def visit_any(self, typ: AnyType) -> list[str]:
if (typ.missing_import_name is not None):
return [make_trigger(typ.missing_import_name)]
return []
def visit_none_type(self, typ: NoneType) -> list[str]:
return []
def visit_callable_type(self, typ: CallableType) -> list[str]:
triggers = []
for arg in typ.arg_types:
triggers.extend(self.get_type_triggers(arg))
triggers.extend(self.get_type_triggers(typ.ret_type))
return triggers
def visit_overloaded(self, typ: Overloaded) -> list[str]:
triggers = []
for item in typ.items:
triggers.extend(self.get_type_triggers(item))
return triggers
def visit_erased_type(self, t: ErasedType) -> list[str]:
assert False, 'Should not see an erased type here'
def visit_deleted_type(self, typ: DeletedType) -> list[str]:
return []
def visit_partial_type(self, typ: PartialType) -> list[str]:
assert False, 'Should not see a partial type here'
def visit_tuple_type(self, typ: TupleType) -> list[str]:
triggers = []
for item in typ.items:
triggers.extend(self.get_type_triggers(item))
triggers.extend(self.get_type_triggers(typ.partial_fallback))
return triggers
def visit_type_type(self, typ: TypeType) -> list[str]:
triggers = self.get_type_triggers(typ.item)
if (not self.use_logical_deps):
old_triggers = triggers.copy()
for trigger in old_triggers:
triggers.append((trigger.rstrip('>') + '.__init__>'))
triggers.append((trigger.rstrip('>') + '.__new__>'))
return triggers
def visit_type_var(self, typ: TypeVarType) -> list[str]:
triggers = []
if typ.fullname:
triggers.append(make_trigger(typ.fullname))
if typ.upper_bound:
triggers.extend(self.get_type_triggers(typ.upper_bound))
if typ.default:
triggers.extend(self.get_type_triggers(typ.default))
for val in typ.values:
triggers.extend(self.get_type_triggers(val))
return triggers
def visit_param_spec(self, typ: ParamSpecType) -> list[str]:
triggers = []
if typ.fullname:
triggers.append(make_trigger(typ.fullname))
if typ.upper_bound:
triggers.extend(self.get_type_triggers(typ.upper_bound))
if typ.default:
triggers.extend(self.get_type_triggers(typ.default))
triggers.extend(self.get_type_triggers(typ.upper_bound))
return triggers
def visit_type_var_tuple(self, typ: TypeVarTupleType) -> list[str]:
triggers = []
if typ.fullname:
triggers.append(make_trigger(typ.fullname))
if typ.upper_bound:
triggers.extend(self.get_type_triggers(typ.upper_bound))
if typ.default:
triggers.extend(self.get_type_triggers(typ.default))
triggers.extend(self.get_type_triggers(typ.upper_bound))
return triggers
def visit_unpack_type(self, typ: UnpackType) -> list[str]:
return typ.type.accept(self)
def visit_parameters(self, typ: Parameters) -> list[str]:
triggers = []
for arg in typ.arg_types:
triggers.extend(self.get_type_triggers(arg))
return triggers
def visit_typeddict_type(self, typ: TypedDictType) -> list[str]:
triggers = []
for item in typ.items.values():
triggers.extend(self.get_type_triggers(item))
triggers.extend(self.get_type_triggers(typ.fallback))
return triggers
def visit_literal_type(self, typ: LiteralType) -> list[str]:
return self.get_type_triggers(typ.fallback)
def visit_unbound_type(self, typ: UnboundType) -> list[str]:
return []
def visit_uninhabited_type(self, typ: UninhabitedType) -> list[str]:
return []
def visit_union_type(self, typ: UnionType) -> list[str]:
triggers = []
for item in typ.items:
triggers.extend(self.get_type_triggers(item))
return triggers |
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
if (not is_torch_available()):
raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')
import torch
from torch.onnx import export
from .pytorch_utils import is_torch_less_than_1_11
print(f'Using framework PyTorch: {torch.__version__}')
with torch.no_grad():
(input_names, output_names, dynamic_axes, tokens) = infer_shapes(nlp, 'pt')
(ordered_input_names, model_args) = ensure_valid_input(nlp.model, tokens, input_names)
if is_torch_less_than_1_11:
export(nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, use_external_data_format=use_external_format, enable_onnx_checker=True, opset_version=opset)
else:
export(nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset) |
def _convert_convolution(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
is_deconv = (type(keras_layer).__name__ == 'Conv2DTranspose')
is_depthconv = (type(keras_layer).__name__ == 'DepthwiseConv2D')
weightList = keras_layer.get_weights()
weight = weightList[0]
if (etab.data_layout == 'NHWC'):
if is_depthconv:
kernel_layout = 'HWOI'
else:
kernel_layout = 'HWIO'
else:
kernel_layout = 'OIHW'
if is_deconv:
(kernel_h, kernel_w, n_filters, in_channels) = weight.shape
if (kernel_layout == 'OIHW'):
weight = weight.transpose([3, 2, 0, 1])
elif is_depthconv:
(kernel_h, kernel_w, in_channels, depth_mult) = weight.shape
if (kernel_layout == 'OIHW'):
weight = weight.transpose([2, 3, 0, 1])
elif (etab.data_layout == 'NCHW'):
(kernel_h, kernel_w, in_channels, n_filters) = weight.shape
weight = weight.transpose([3, 2, 0, 1])
else:
(kernel_h, kernel_w, in_channels, n_filters) = weight.shape
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
dilated_kernel_h = (((kernel_h - 1) * dilation[0]) + 1)
dilated_kernel_w = (((kernel_w - 1) * dilation[1]) + 1)
(stride_h, stride_w) = keras_layer.strides
params = {'weight': etab.new_const(weight, dtype=str(weight.dtype)), 'kernel_size': [kernel_h, kernel_w], 'strides': [stride_h, stride_w], 'dilation': dilation, 'padding': [0, 0], 'data_layout': etab.data_layout, 'kernel_layout': kernel_layout}
if is_depthconv:
params['channels'] = (in_channels * depth_mult)
params['groups'] = in_channels
else:
params['channels'] = n_filters
if (keras_layer.padding == 'valid'):
pass
elif (keras_layer.padding == 'same'):
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
(pad_t, pad_b) = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
(pad_l, pad_r) = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params['padding'] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = 'Padding with {} is not supported for operator Convolution in frontend Keras.'
raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding))
if is_deconv:
out = _op.nn.conv2d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv2d(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
if (etab.data_layout == 'NCHW'):
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=(- 1))
if (sys.version_info.major < 3):
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if (act_type != 'linear'):
out = _convert_activation(out, act_type, etab)
return out |
def _visit_display(builder: IRBuilder, items: list[Expression], constructor_op: Callable[([list[Value], int], Value)], append_op: CFunctionDescription, extend_op: CFunctionDescription, line: int, is_list: bool) -> Value:
accepted_items = []
for item in items:
if isinstance(item, StarExpr):
accepted_items.append((True, builder.accept(item.expr)))
else:
accepted_items.append((False, builder.accept(item)))
result: (Value | None) = None
initial_items = []
for (starred, value) in accepted_items:
if ((result is None) and (not starred) and is_list):
initial_items.append(value)
continue
if (result is None):
result = constructor_op(initial_items, line)
builder.call_c((extend_op if starred else append_op), [result, value], line)
if (result is None):
result = constructor_op(initial_items, line)
return result |
class TestFileMagic():
def test_read_bytes_crash(self, mocker):
mock_open = mocker.patch('io.open')
mock_open().__enter__().read.side_effect = IOError
volume = Volume(disk=Disk(ImageParser(), '...'))
volume.get_raw_path = mocker.Mock(return_value='...')
assert (volume._get_magic_type() is None) |
def test_trustme_cli_quiet(capsys: pytest.CaptureFixture[str], tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.chdir(tmp_path)
main(argv=['-q'])
assert tmp_path.joinpath('server.key').exists()
assert tmp_path.joinpath('server.pem').exists()
assert tmp_path.joinpath('client.pem').exists()
captured = capsys.readouterr()
assert (not captured.out) |
def add_empty_config(args):
keys = ['get_read_time', 'split_row_groups', 'dask_profile', 'verify_results']
for key in keys:
if (key not in args):
args[key] = None
if ('file_format' not in args):
args['file_format'] = 'parquet'
if ('output_filetype' not in args):
args['output_filetype'] = 'parquet'
return args |
def upload_training_data(training_dir, api_key=None):
results = monitoring.load_results(training_dir)
if (not results):
raise error.Error("Could not find any manifest files in {}.\n\n(HINT: this usually means you did not yet close() your env.monitor and have not yet exited the process. You should call 'env.monitor.start(training_dir)' at the start of training and 'env.monitor.close()' at the end, or exit the process.)".format(training_dir))
manifests = results['manifests']
env_info = results['env_info']
data_sources = results['data_sources']
timestamps = results['timestamps']
episode_lengths = results['episode_lengths']
episode_rewards = results['episode_rewards']
episode_types = results['episode_types']
initial_reset_timestamps = results['initial_reset_timestamps']
videos = results['videos']
env_id = env_info['env_id']
logger.debug('[%s] Uploading data from manifest %s', env_id, ', '.join(manifests))
if (len(episode_lengths) > 0):
training_episode_batch = upload_training_episode_batch(data_sources, episode_lengths, episode_rewards, episode_types, initial_reset_timestamps, timestamps, api_key, env_id=env_id)
else:
training_episode_batch = None
if (len(videos) > MAX_VIDEOS):
logger.warn('[%s] You recorded videos for %s episodes, but the scoreboard only supports up to %s. We will automatically subsample for you, but you also might wish to adjust your video recording rate.', env_id, len(videos), MAX_VIDEOS)
subsample_inds = np.linspace(0, (len(videos) - 1), MAX_VIDEOS).astype('int')
videos = [videos[i] for i in subsample_inds]
if (len(videos) > 0):
training_video = upload_training_video(videos, api_key, env_id=env_id)
else:
training_video = None
return (env_info, training_episode_batch, training_video) |
def tableify(lines, header=True, topdeco=True, bottomdeco=True):
def rowline(text, maxlens, alignments=['<', '>']):
outline = ''
for (ndx, length) in enumerate(maxlens):
align = (alignments[ndx] if (ndx < len(alignments)) else alignments[(- 1)])
if ((text[ndx] is not None) and (len(unicode(text[ndx])) > TABLE_MAX_LENGTH)):
text[ndx] = u'{0}...{1}'.format(unicode(text[ndx])[:((TABLE_MAX_LENGTH / 2) - 3)], unicode(text[ndx])[((- (TABLE_MAX_LENGTH / 2)) + 3):])
outline += u'| {0: {1}{2}} '.format((unicode(text[ndx]) or u''), align, (length - 3))
outline += u'|'
return outline.encode('UTF-8')
def sepline(maxlens):
outline = ''
for length in maxlens:
outline += '{0:-<{1}}'.format('+', length)
outline += '+'
return outline
if header:
if ((len(lines) > 1) and (len(lines[0]) < len(lines[1]))):
lines[0].insert(0, '')
maxlens = map((lambda col: (len(unicode(col)) + 3)), lines[0])
for line in lines:
for (ndx, col) in enumerate(line):
maxlens[ndx] = max(maxlens[ndx], (len(unicode(col)) + 3))
for ndx in range(len(maxlens)):
if (maxlens[ndx] > TABLE_MAX_LENGTH):
maxlens[ndx] = TABLE_MAX_LENGTH
outlines = []
if topdeco:
outlines.append(sepline(maxlens))
if header:
outlines.append(rowline(lines.pop(0), maxlens, ['^']))
outlines.append(sepline(maxlens))
for line in lines:
outlines.append(rowline(line, maxlens))
if bottomdeco:
outlines.append(sepline(maxlens))
return outlines |
class TestAdaroundLoss(unittest.TestCase):
def _compute_recon_loss(self, device):
tf.compat.v1.reset_default_graph()
session = tf.compat.v1.Session()
np.random.seed(0)
inp = np.random.rand(32, 3, 12, 12)
target = np.random.rand(32, 3, 12, 12)
inp_t = np.transpose(inp, (0, 2, 3, 1))
target_t = np.transpose(target, (0, 2, 3, 1))
channels_index = (len(target_t.shape) - 1)
with tf.device(device):
inp_tensor = tf.convert_to_tensor(inp_t, dtype=tf.float32)
target_tensor = tf.convert_to_tensor(target_t, dtype=tf.float32)
recons_loss = AdaroundLoss.compute_recon_loss(inp_tensor, target_tensor, channels_index)
self.assertAlmostEqual(session.run(recons_loss), 0., places=4)
inp = np.random.rand(32, 10)
target = np.random.rand(32, 10)
channels_index = (len(target.shape) - 1)
with tf.device(device):
inp_tensor = tf.convert_to_tensor(inp, dtype=tf.float32)
target_tensor = tf.convert_to_tensor(target, dtype=tf.float32)
recons_loss = AdaroundLoss.compute_recon_loss(inp_tensor, target_tensor, channels_index)
self.assertAlmostEqual(session.run(recons_loss), 1., places=4)
session.close()
.cuda
def test_compute_recon_loss_gpu(self):
device = '/gpu:0'
self._compute_recon_loss(device)
def test_compute_recon_loss(self):
device = '/cpu:0'
self._compute_recon_loss(device)
def _compute_round_loss(self, device):
tf.compat.v1.reset_default_graph()
np.random.seed(0)
alpha = np.random.rand(1, 3, 12, 12)
reg_param = 0.01
with tf.device(device):
beta_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=[])
warm_start_tensor = tf.compat.v1.placeholder(dtype=tf.bool, shape=[])
alpha_tensor = tf.convert_to_tensor(alpha, dtype=tf.float32)
round_loss_tensor = AdaroundLoss.compute_round_loss(alpha_tensor, reg_param, warm_start_tensor, beta_tensor)
session = tf.compat.v1.Session()
beta = 17.
warm_start = True
rounding_loss_1 = session.run(round_loss_tensor, feed_dict={beta_tensor: beta, warm_start_tensor: warm_start})
self.assertEqual(rounding_loss_1, 0)
beta = 4.
warm_start = False
rounding_loss_2 = session.run(round_loss_tensor, feed_dict={beta_tensor: beta, warm_start_tensor: warm_start})
self.assertAlmostEqual(rounding_loss_2, 4., places=4)
session.close()
.cuda
def test_compute_round_loss_gpu(self):
device = '/gpu:0'
self._compute_round_loss(device)
def test_compute_round_loss(self):
device = '/cpu:0'
self._compute_round_loss(device)
def test_compute_beta(self):
num_iterations = 10000
cur_iter = 8000
beta_range = (20, 2)
warm_start = 0.2
self.assertEqual(AdaroundLoss.compute_beta(num_iterations, cur_iter, beta_range, warm_start), 4.) |
class UntrustedServerReturnedError(NetworkException):
def __init__(self, *, original_exception):
self.original_exception = original_exception
def get_message_for_gui(self) -> str:
return str(self)
def __str__(self):
return _('The server returned an error.')
def __repr__(self):
return f'<UntrustedServerReturnedError [DO NOT TRUST THIS MESSAGE] original_exception: {repr(self.original_exception)}>' |
def series_extract_missing(series: pd.Series) -> pd.Series:
def _decode(x):
if (np.issubdtype(type(x), np.floating) and np.isnan(x)):
(code, namespace) = _get_payload_from_nan(x)
if (namespace is None):
return x
elif (namespace == 255):
raise ValueError('Custom enumerations are not yet supported')
else:
try:
enum = _MISSING_ENUMS[_NAMESPACE_LOOKUP[namespace]]
except (IndexError, KeyError):
return x
try:
return enum[code]
except IndexError:
return x
return x
missing = series[series.isna()]
missing = missing.apply(_decode)
return missing.astype(object) |
def get_grade_from_index(index_in):
if (index_in == 0):
return 0
elif (index_in < 6):
return 1
elif (index_in < 16):
return 2
elif (index_in < 26):
return 3
elif (index_in < 31):
return 4
elif (index_in == 31):
return 5
else:
raise ValueError('Index is out of multivector bounds') |
class WeightedRMSE(BaseMetric):
def __init__(self, label_name):
self._label_name = label_name
def eval(self, predict, labels_map):
label = labels_map[self._label_name]
if ((np.sum(label) == 0) or (np.sum(label) == label.size)):
return MetricResult(result=float('nan'))
else:
weight = np.where((label > 0), np.ones_like(label), np.zeros_like(label))
mse = mean_squared_error(y_true=label, y_pred=predict, sample_weight=weight)
rmse = np.sqrt(mse)
return MetricResult(result=rmse, meta={'#': predict.size})
def required_label_names(self):
return [self._label_name] |
def _StatusBarTruncInfo(win):
truncInfo = _WindowTruncInfo(win)
for (i, (title, rect, font, flag)) in enumerate(truncInfo):
rect.bottom -= win.VertBorderWidth
if (i == 0):
rect.right -= win.HorizBorderWidth
else:
rect.right -= win.InterBorderWidth
return truncInfo |
def score_2afc_dataset(data_loader, func):
d0s = []
d1s = []
gts = []
for (i, data) in enumerate(data_loader.load_data()):
d0s += func(data['ref'], data['p0']).tolist()
d1s += func(data['ref'], data['p1']).tolist()
gts += data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = ((((d0s < d1s) * (1.0 - gts)) + ((d1s < d0s) * gts)) + ((d1s == d0s) * 0.5))
return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores)) |
def main(_):
config = _config.build_config()
config['train_epochs'] = 200
config['lr_decay_method'] = 'STEPWISE'
config['train_seconds'] = (- 1)
spec = create_best_nasbench_spec(config)
data = evaluate.augment_and_evaluate(spec, config, FLAGS.model_dir)
tf.logging.info(data) |
def filter_matrix_rows(matrix, keep_rows):
if isinstance(matrix, _kaldi_matrix.Matrix):
return _sparse_matrix._filter_matrix_rows(matrix, keep_rows)
if isinstance(matrix, _sparse_matrix.SparseMatrix):
return _sparse_matrix._filter_sparse_matrix_rows(matrix, keep_rows)
if isinstance(matrix, _compressed_matrix.CompressedMatrix):
return _sparse_matrix._filter_compressed_matrix_rows(matrix, keep_rows)
if isinstance(matrix, _sparse_matrix.GeneralMatrix):
return _sparse_matrix._filter_general_matrix_rows(matrix, keep_rows)
if isinstance(matrix, _kaldi_matrix.DoubleMatrix):
return _sparse_matrix._filter_matrix_rows_double(matrix, keep_rows)
if isinstance(matrix, _sparse_matrix.DoubleSparseMatrix):
return _sparse_matrix._filter_sparse_matrix_rows_double(matrix, keep_rows)
raise TypeError('input matrix type is not supported.') |
class TestNoDataDir(object):
def setup_method(self):
self.temporary_file_list = False
self.saved_data_path = pysat.params['data_dirs']
pysat.params.data['data_dirs'] = []
reload(pysat._files)
return
def teardown_method(self):
pysat.params.data['data_dirs'] = self.saved_data_path
reload(pysat._files)
return
def test_no_data_dir(self):
testing.eval_bad_input(pysat.Instrument, NameError, 'Please set a top-level directory path')
return |
class TestCreateColormap(EndianTest):
def setUp(self):
self.req_args_0 = {'alloc': 0, 'mid': , 'visual': , 'window': }
self.req_bin_0 = b'N\x00\x04\x00\xac8VT\x84\x94\xdb\n\xe8\x1cT$'
def testPackRequest0(self):
bin = request.CreateColormap._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.CreateColormap._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
.cuda
.parametrize('quant_scheme', [QuantScheme.post_training_tf, QuantScheme.training_range_learning_with_tf_init, QuantScheme.post_training_tf_enhanced, QuantScheme.training_range_learning_with_tf_enhanced_init])
def test_initialization_and_export_non_strict_symmetric(quant_scheme) -> None:
tf.compat.v1.reset_default_graph()
model = tf.keras.Sequential([tf.keras.layers.Conv2D(2, (3, 3), input_shape=(32, 32, 4)), tf.keras.layers.ReLU(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(2, activation='softmax', name='keras_model')])
sim = QuantizationSimModel(model, quant_scheme=quant_scheme)
sim.compute_encodings((lambda m, _: m(np.random.randn(1, 32, 32, 4))), None)
conv_op = sim.layers[1]
initialized_encoding_min = tf.keras.backend.get_value(conv_op.param_quantizers[0].encoding_min)
initialized_encoding_max = tf.keras.backend.get_value(conv_op.param_quantizers[0].encoding_max)
if (quant_scheme in RANGE_LEARNING_SCHEMES):
assert (initialized_encoding_min == (- initialized_encoding_max))
else:
assert (initialized_encoding_min != (- initialized_encoding_max))
sim.export('/tmp/', 'quant_sim_model')
with open('/tmp/quant_sim_model.encodings') as json_file:
encoding_data = json.load(json_file)
param_encodings = encoding_data['param_encodings']
for encodings in param_encodings.values():
for encoding_info in encodings:
encoding_min = encoding_info['min']
encoding_max = encoding_info['max']
scale = encoding_info['scale']
offset = encoding_info['offset']
if (quant_scheme in RANGE_LEARNING_SCHEMES):
assert (encoding_min == ((- encoding_max) - scale))
else:
assert np.isclose(encoding_min, ((- encoding_max) - scale))
assert (offset == (- 128))
assert np.isclose(encoding_min, (scale * offset), atol=1e-06)
assert np.isclose(encoding_max, (encoding_min + (scale * 255)), atol=1e-06) |
class CallC(RegisterOp):
def __init__(self, function_name: str, args: list[Value], ret_type: RType, steals: StealsDescription, is_borrowed: bool, error_kind: int, line: int, var_arg_idx: int=(- 1)) -> None:
self.error_kind = error_kind
super().__init__(line)
self.function_name = function_name
self.args = args
self.type = ret_type
self.steals = steals
self.is_borrowed = is_borrowed
self.var_arg_idx = var_arg_idx
def sources(self) -> list[Value]:
return self.args
def stolen(self) -> list[Value]:
if isinstance(self.steals, list):
assert (len(self.steals) == len(self.args))
return [arg for (arg, steal) in zip(self.args, self.steals) if steal]
else:
return ([] if (not self.steals) else self.sources())
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_call_c(self) |
def create_toy_graph():
synsets = {}
for (wn_id, name) in enumerate(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']):
synsets[name] = imagenet_spec.Synset(wn_id, name, set(), set())
is_a_relations = [('a', 'b'), ('a', 'c'), ('b', 'g'), ('c', 'd'), ('c', 'e'), ('e', 'f'), ('e', 'h')]
for t in is_a_relations:
(parent, child) = t
synsets[parent].children.add(synsets[child])
synsets[child].parents.add(synsets[parent])
subset = ['f', 'g']
synsets_subset = [s for s in synsets.values() if (s.words in subset)]
graph_nodes = imagenet_spec.create_sampling_graph(synsets_subset)
spanning_leaves = imagenet_spec.get_spanning_leaves(graph_nodes)
return (graph_nodes, spanning_leaves, synsets_subset) |
def _get_in_vals(binst: BloqInstance, reg: Register, soq_assign: Dict[(Soquet, RegPosition)]) -> Union[(RegPosition, NDArray[RegPosition])]:
if (not reg.shape):
return soq_assign[Soquet(binst, reg)]
arg = np.empty(reg.shape, dtype=object)
for idx in reg.all_idxs():
soq = Soquet(binst, reg, idx=idx)
arg[idx] = soq_assign[soq]
return arg |
def test_initialize_setup_cfg_only(hatch, helpers, temp_dir):
setup_cfg_file = (temp_dir / 'setup.cfg')
setup_cfg_file.write_text('[metadata]\nname = testapp\nversion = attr:testapp.__version__\ndescription = Foo\nauthor = U.N. Owen\nauthor_email = \nurl = = MIT\n')
with temp_dir.as_cwd():
result = hatch('new', '--init')
assert (result.exit_code == 0), result.output
assert (remove_trailing_spaces(result.output) == helpers.dedent('\n Migrating project metadata from setuptools\n '))
project_file = (temp_dir / 'pyproject.toml')
assert (project_file.read_text() == '[build-system]\nrequires = ["hatchling"]\nbuild-backend = "hatchling.build"\n\n[project]\nname = "testapp"\ndynamic = ["version"]\ndescription = "Foo"\nlicense = "MIT"\nauthors = [\n { name = "U.N. Owen", email = "" },\n]\n\n[project.urls]\nHomepage = " = "testapp/__init__.py"\n\n[tool.hatch.build.targets.sdist]\ninclude = [\n "/testapp",\n]\n') |
class TestElements():
def setup_method(self):
test_file_path = mm.datasets.get_path('bubenec')
self.df_buildings = gpd.read_file(test_file_path, layer='buildings')
self.df_tessellation = gpd.read_file(test_file_path, layer='tessellation')
self.df_streets = gpd.read_file(test_file_path, layer='streets')
self.df_streets['nID'] = range(len(self.df_streets))
self.limit = mm.buffered_limit(self.df_buildings, 50)
self.enclosures = mm.enclosures(self.df_streets, gpd.GeoSeries([self.limit.exterior]))
def test_Tessellation(self):
tes = mm.Tessellation(self.df_buildings, 'uID', self.limit, segment=2)
tessellation = tes.tessellation
assert (len(tessellation) == len(self.df_tessellation))
bands = mm.Tessellation(self.df_streets, 'nID', mm.buffered_limit(self.df_streets, 50), segment=5).tessellation
assert (len(bands) == len(self.df_streets))
def test_enclosed_tess(self):
enc1 = mm.Tessellation(self.df_buildings, 'uID', enclosures=self.enclosures).tessellation
assert (len(enc1) == 155)
assert isinstance(enc1, gpd.GeoDataFrame)
enc1_loop = mm.Tessellation(self.df_buildings, 'uID', enclosures=self.enclosures, use_dask=False).tessellation
assert (len(enc1) == 155)
assert isinstance(enc1, gpd.GeoDataFrame)
assert (len(enc1_loop) == 155)
assert isinstance(enc1_loop, gpd.GeoDataFrame)
assert_geodataframe_equal(enc1, enc1_loop)
def test_limit_enclosures_combo_error(self):
with pytest.raises(ValueError, match='Both `limit` and `enclosures` cannot'):
mm.Tessellation(self.df_buildings, 'uID', limit=self.limit, enclosures=self.enclosures)
def test_custom_enclosure_id(self):
encl = self.enclosures.copy()
ids = list(range((len(encl) * 2)))
shuffle(ids)
encl['eID'] = ids[:len(encl)]
encl.index = ids[:len(encl)]
enc = mm.Tessellation(self.df_buildings, 'uID', enclosures=encl).tessellation
assert (len(enc) == 155)
assert isinstance(enc, gpd.GeoDataFrame)
def test_erroroneous_geom(self):
df = self.df_buildings
b = df.total_bounds
x = np.mean([b[0], b[2]])
y = np.mean([b[1], b[3]])
df.loc[144] = [145, Polygon([(x, y), (x, (y + 1)), ((x + 1), y)])]
df.loc[145] = [146, MultiPoint([(x, y), ((x + 1), y)]).buffer(0.55)]
df.loc[146] = [147, affinity.rotate(df.geometry.iloc[0], 12)]
with pytest.warns(UserWarning, match='Tessellation does not fully match buildings.'):
mm.Tessellation(df, 'uID', self.limit)
with pytest.warns(UserWarning, match='Tessellation contains MultiPolygon elements.'):
tess = mm.Tessellation(df, 'uID', self.limit)
assert (tess.collapsed == {145})
assert (len(tess.multipolygons) == 3)
def test_crs_error(self):
with pytest.raises(ValueError, match='Geometry is in a geographic CRS'):
mm.Tessellation(self.df_buildings.to_crs(4326), 'uID', self.limit)
def test_Blocks(self):
blocks = mm.Blocks(self.df_tessellation, self.df_streets, self.df_buildings, 'bID', 'uID')
assert (not blocks.tessellation_id.isna().any())
assert (not blocks.buildings_id.isna().any())
assert (len(blocks.blocks) == 8)
with pytest.raises(ValueError, match="'uID' column cannot be"):
mm.Blocks(self.df_tessellation, self.df_streets, self.df_buildings, 'uID', 'uID')
def test_Blocks_non_default_index(self):
tessellation = self.df_tessellation.copy()
tessellation.index = (tessellation.index * 3)
buildings = self.df_buildings.copy()
buildings.index = (buildings.index * 5)
blocks = mm.Blocks(tessellation, self.df_streets, buildings, 'bID', 'uID')
assert_index_equal(tessellation.index, blocks.tessellation_id.index)
assert_index_equal(buildings.index, blocks.buildings_id.index)
def test_Blocks_inner(self):
streets = self.df_streets.copy()
streets.loc[(35, 'geometry')] = self.df_buildings.geometry.iloc[141].representative_point().buffer(20).exterior
blocks = mm.Blocks(self.df_tessellation, streets, self.df_buildings, 'bID', 'uID')
assert (not blocks.tessellation_id.isna().any())
assert (not blocks.buildings_id.isna().any())
assert (len(blocks.blocks) == 9)
assert (len(blocks.blocks.sindex.query_bulk(blocks.blocks.geometry, 'overlaps')[0]) == 0)
def test_get_network_id(self):
buildings_id = mm.get_network_id(self.df_buildings, self.df_streets, 'nID')
assert (not buildings_id.isna().any())
def test_get_network_id_duplicate(self):
self.df_buildings['nID'] = range(len(self.df_buildings))
buildings_id = mm.get_network_id(self.df_buildings, self.df_streets, 'nID')
assert (not buildings_id.isna().any())
def test_get_node_id(self):
nx = mm.gdf_to_nx(self.df_streets)
(nodes, edges) = mm.nx_to_gdf(nx)
self.df_buildings['nID'] = mm.get_network_id(self.df_buildings, self.df_streets, 'nID')
ids1 = mm.get_node_id(self.df_buildings, nodes, edges, 'nodeID', 'nID')
assert (not ids1.isna().any())
edges['nID'] = edges['nID'].astype(str)
_df_buildings = self.df_buildings.copy()
_df_buildings['nID'] = _df_buildings['nID'].astype(str)
_df_buildings.loc[([0, 1], 'nID')] = pd.NA
ids2 = mm.get_node_id(_df_buildings, nodes, edges, 'nodeID', 'nID')
assert (ids2.isna().sum() == 2)
np.testing.assert_array_equal(ids2[ids2.isna()].index, [0, 1])
def test_get_node_id_ratio(self):
nx = mm.gdf_to_nx(self.df_streets)
(nodes, edges) = mm.nx_to_gdf(nx)
convex_hull = edges.unary_union.convex_hull
enclosures = mm.enclosures(edges, limit=gpd.GeoSeries([convex_hull]))
enclosed_tess = mm.Tessellation(self.df_buildings, unique_id='uID', enclosures=enclosures).tessellation
links = mm.get_network_ratio(enclosed_tess, edges)
enclosed_tess[links.columns] = links
ids = mm.get_node_id(enclosed_tess, nodes, edges, node_id='nodeID', edge_keys='edgeID_keys', edge_values='edgeID_values')
assert (not ids.isna().any())
def test_enclosures(self):
basic = mm.enclosures(self.df_streets)
assert (len(basic) == 7)
assert isinstance(basic, gpd.GeoDataFrame)
limited = mm.enclosures(self.df_streets, self.limit)
assert (len(limited) == 20)
assert isinstance(limited, gpd.GeoDataFrame)
limited2 = mm.enclosures(self.df_streets, gpd.GeoSeries([self.limit]))
assert (len(limited2) == 20)
assert isinstance(limited2, gpd.GeoDataFrame)
b = self.limit.bounds
additional_barrier = gpd.GeoSeries([LineString([(b[0], b[1]), (b[2], b[3])])])
additional = mm.enclosures(self.df_streets, gpd.GeoSeries([self.limit]), [additional_barrier])
assert (len(additional) == 28)
assert isinstance(additional, gpd.GeoDataFrame)
with pytest.raises(TypeError, match='`additional_barriers` expects a list'):
additional = mm.enclosures(self.df_streets, gpd.GeoSeries([self.limit]), additional_barrier)
limit = self.df_streets.unary_union.convex_hull.buffer((- 100))
encl = mm.enclosures(self.df_streets, limit=gpd.GeoSeries([limit]), clip=True)
assert (len(encl) == 18)
def test_get_network_ratio(self):
convex_hull = self.df_streets.unary_union.convex_hull
enclosures = mm.enclosures(self.df_streets, limit=gpd.GeoSeries([convex_hull]))
enclosed_tess = mm.Tessellation(self.df_buildings, unique_id='uID', enclosures=enclosures).tessellation
links = mm.get_network_ratio(enclosed_tess, self.df_streets, initial_buffer=10)
assert (links.edgeID_values.apply((lambda x: sum(x))).sum() == len(enclosed_tess))
m = (enclosed_tess['uID'] == 110)
assert (sorted(links.loc[m].iloc[0]['edgeID_keys']) == [0, 34])
enclosed_tess.index = [str(uuid.uuid4()) for _ in range(len(enclosed_tess))]
links2 = mm.get_network_ratio(enclosed_tess, self.df_streets, initial_buffer=10)
assert_index_equal(enclosed_tess.index, links2.index, check_order=False)
expected_head = [[0, 34], [34], [34], [0], [0, 15, 3, 14, 4, 7]]
expected_tail = [[28], [29], [28], [32], [21]]
for (i, idx) in enumerate(expected_head):
assert (sorted(links2.edgeID_keys.iloc[i]) == sorted(idx))
for (i, idx) in enumerate(expected_tail):
assert (sorted(links2.edgeID_keys.tail(5).iloc[i]) == sorted(idx)) |
class Rosenbrock(object):
def __init__(self):
self._dim = 2
self._search_domain = numpy.repeat([[(- 2.0), 2.0]], self._dim, axis=0)
self._num_init_pts = 3
self._sample_var = 0.0
self._min_value = 0.0
self._observations = []
self._num_fidelity = 0
def evaluate_true(self, x):
value = 0.0
for i in range((self._dim - 1)):
value += (pow((1.0 - x[i]), 2.0) + (100.0 * pow((x[(i + 1)] - pow(x[i], 2.0)), 2.0)))
results = [value]
for i in range((self._dim - 1)):
results += [((2.0 * (x[i] - 1)) - ((400.0 * x[i]) * (x[(i + 1)] - pow(x[i], 2.0))))]
results += [(200.0 * (x[(self._dim - 1)] - pow(x[(self._dim - 2)], 2.0)))]
return numpy.array(results)
def evaluate(self, x):
return self.evaluate_true(x) |
def _replace_file(original_path):
(fh, replacement_path) = tempfile.mkstemp()
try:
with os.fdopen(fh, 'w') as replacement:
with open(original_path) as original:
(yield (original, replacement))
except Exception:
raise
else:
shutil.copymode(original_path, replacement_path)
os.remove(original_path)
shutil.move(replacement_path, original_path) |
(4)
def _downgrade_v4(op):
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
op.execute('UPDATE equities SET exchange = exchange_full')
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('exchange_full')
op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol']) |
class NewlinesFilter(UniqueFilter):
name = 'newlines'
events = (Event.MESSAGE,)
extra_fields_type = ExtraNewlinesSettings
async def triggered_on(self, ctx: FilterContext) -> bool:
earliest_relevant_at = (arrow.utcnow() - timedelta(seconds=self.extra_fields.interval))
relevant_messages = list(takewhile((lambda msg: (msg.created_at > earliest_relevant_at)), ctx.content))
detected_messages = {msg for msg in relevant_messages if (msg.author == ctx.author)}
newline_counts = []
for msg in detected_messages:
newline_counts += [len(group) for group in NEWLINES.findall(msg.content)]
total_recent_newlines = sum(newline_counts)
max_newline_group = max(newline_counts, default=0)
if (total_recent_newlines > self.extra_fields.threshold):
ctx.related_messages |= detected_messages
ctx.filter_info[self] = f'sent {total_recent_newlines} newlines'
return True
if (max_newline_group > self.extra_fields.consecutive_threshold):
ctx.related_messages |= detected_messages
ctx.filter_info[self] = f'sent {max_newline_group} consecutive newlines'
return True
return False |
def test_pytest_configure_warning(pytester: Pytester, recwarn) -> None:
pytester.makeconftest('\n def pytest_configure():\n import warnings\n\n warnings.warn("from pytest_configure")\n ')
result = pytester.runpytest()
assert (result.ret == 5)
assert ('INTERNALERROR' not in result.stderr.str())
warning = recwarn.pop()
assert (str(warning.message) == 'from pytest_configure') |
class notMNIST(torch.utils.data.Dataset):
def __init__(self, root, train=True, transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.filename = 'notmnist.zip'
self.url = '
fpath = os.path.join(root, self.filename)
if (not os.path.isfile(fpath)):
if (not download):
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print(('Downloading from ' + self.url))
self.download()
training_file = 'notmnist_train.pkl'
testing_file = 'notmnist_test.pkl'
if train:
with open(os.path.join(root, training_file), 'rb') as f:
train = pickle.load(f)
self.data = train['features'].astype(np.uint8)
self.labels = train['labels'].astype(np.uint8)
else:
with open(os.path.join(root, testing_file), 'rb') as f:
test = pickle.load(f)
self.data = test['features'].astype(np.uint8)
self.labels = test['labels'].astype(np.uint8)
def __getitem__(self, index):
(img, target) = (self.data[index], self.labels[index])
img = Image.fromarray(img[0])
if (self.transform is not None):
img = self.transform(img)
return (img, target)
def __len__(self):
return len(self.data)
def download(self):
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if (e.errno == errno.EEXIST):
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close() |
def ResidualNet(network_type, depth, num_classes, att_type, joint):
assert (network_type in ['ImageNet', 'CIFAR10', 'CIFAR100']), 'network type should be ImageNet or CIFAR10 / CIFAR100'
assert (depth in [18, 34, 50, 101]), 'network depth should be 18, 34, 50 or 101'
if (depth == 18):
model = ResNet(BasicBlock, [2, 2, 2, 2], network_type, num_classes, att_type, joint)
elif (depth == 34):
model = ResNet(BasicBlock, [3, 4, 6, 3], network_type, num_classes, att_type, joint)
elif (depth == 50):
model = ResNet(Bottleneck, [3, 4, 6, 3], network_type, num_classes, att_type, joint)
elif (depth == 101):
model = ResNet(Bottleneck, [3, 4, 23, 3], network_type, num_classes, att_type, joint)
return model |
class Effect6607(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Armored Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff4Value', src.getModifiedItemAttr('shipBonusSupercarrierA5'), skill='Amarr Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Armored Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff3Value', src.getModifiedItemAttr('shipBonusSupercarrierA5'), skill='Amarr Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Armored Command') or mod.item.requiresSkill('Information Command'))), 'buffDuration', src.getModifiedItemAttr('shipBonusSupercarrierA5'), skill='Amarr Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Armored Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff2Value', src.getModifiedItemAttr('shipBonusSupercarrierA5'), skill='Amarr Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Armored Command') or mod.item.requiresSkill('Information Command'))), 'warfareBuff1Value', src.getModifiedItemAttr('shipBonusSupercarrierA5'), skill='Amarr Carrier', **kwargs) |
class BitStatusWidget(HBox, SiemensWidget, _Mixin_DB_property, _Mixin_Byte_property, _Mixin_Bit_property):
icon = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEUAAAAdCAIAAABzMjbkAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAJrSURBVFhH3ZQxSBtRGMffcpDlpiylEG5xOYTQrUgGN5FMEjIJIhnawVGQuJxwk0OLHUKXgtCCZMgSh3aQFjoIVlBKaCDFQggR4pBMN914vvfu/9XLJTUqr5/gj/9wd3m53/e99+6JJ0jEC0nNB8DDBUnNB8DDBUnNB8DDBUnNB8DDBUnNB8DDBUnNB8DDBUnNB8CTpu27otzEzXTuMGQSks5KIDonwl8Tji2aqZ/+EQBPmkfup7kqnILwd8Rzce9+wovG+rxtyetMbrnWCnWhhC54YsTEkGZZuH47LjlxMzj2l3MZOcay5zeOBvJJ/JdkHbelLaTmnv30agvC3T4Poig49/KWUz1TlSQn/6zqWHkvHrHt/i08OWR6P99e2fZKvR9GYb9RzlrFgyFDP8ODopUtvjv5M5Le0WXvStY9Xmxw1buUP2pa3hw9n92PnojXhz/VK/ESSCeqmZ6H9CNVnfpGYS6bUbvJP1a7YrxYvW0cvd80d+5Hrsv3vdKLZ7Yl99t640LOSfyGZB235SH93Mx+0P20Ylurn9V1oli1gE7lsJtet5n93Kx2ODqVGzW/+5uhH7UpFt50pDfs18f6Wfyg1yr+wE5lz0H362beSvaDIfra9eRJEY5+vV+y437iifgiv5/4w+PpJwpb+yU6vQpb+hRSFb59KZ/p7sJWLT6lMrlyteKKpY/D9JBo+MNTN/ogqyzSYg2Otgp0vpX21cGolP+5H1ZIaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj4AHi5Iaj5PDiGuAaoeNYuSC/YWAAAAAElFTkSuQmCC'
_attribute_decorator('WidgetSpecific', 'The label text', str, {})
def text(self):
return self.label.text
def text(self, v):
self.label.text = v
label = None
label_value = None
def __init__(self, text='bit status widget', db_index=(- 1), byte_index=(- 1), bit_index=(- 1), *args, **kwargs):
default_style = {'position': 'absolute', 'left': '10px', 'top': '10px', 'align-items': 'stretch', 'justify-content': 'flex-start'}
default_style.update(kwargs.get('style', {}))
kwargs['style'] = default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width', '100px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height', '30px'))
super(BitStatusWidget, self).__init__(*args, **kwargs)
SiemensWidget._setup(self)
_style = style_inheritance_text_dict
_style.update(style_inheritance_dict)
_style['border'] = '1px solid black'
self.label = gui.Label(text, width='100%', height='100%', style=_style)
_style.update({'background-color': 'gray', 'text-align': 'center'})
self.label_value = gui.Label('0', width='30px', height='100%', style=_style)
self.append([self.label, self.label_value])
self.db_index = db_index
self.byte_index = byte_index
self.bit_index = bit_index
def update(self, *args):
if (self.plc_instance == None):
return
if ((self.db_index < 0) or (self.byte_index < 0) or (self.bit_index < 0)):
return
value = self.plc_instance.get_bool(self.db_index, self.byte_index, self.bit_index)
self.label_value.set_text(('1' if value else '0'))
style = {'border': '1px solid black', 'background-color': 'gray'}
if value:
style = {'border': '1px solid black', 'background-color': 'yellow'}
self.label_value.style.update(style) |
class CalcSwapLocalModuleCommand(wx.Command):
def __init__(self, fitID, position1, position2):
wx.Command.__init__(self, True, 'Swap Modules')
self.fitID = fitID
self.position1 = position1
self.position2 = position2
def Do(self):
pyfalog.debug('Doing swapping between {} and {} for fit {}'.format(self.position1, self.position2, self.fitID))
self.__swap(self.fitID, self.position1, self.position2)
return True
def Undo(self):
self.__swap(self.fitID, self.position2, self.position1)
pyfalog.debug('Undoing swapping between {} and {} for fit {}'.format(self.position1, self.position2, self.fitID))
return True
def __swap(self, fitID, position1, position2):
fit = Fit.getInstance().getFit(fitID)
mod1 = fit.modules[position1]
mod2 = fit.modules[position2]
fit.modules.free(position1)
fit.modules.free(position2)
fit.modules.replace(position2, mod1)
if ((len(fit.modules) <= position2) or (fit.modules[position2] is not mod1)):
fit.modules.replace(position1, mod1)
fit.modules.replace(position2, mod2)
return False
fit.modules.replace(position1, mod2)
if ((len(fit.modules) <= position1) or (fit.modules[position1] is not mod2)):
fit.modules.free(position2)
fit.modules.replace(position1, mod1)
fit.modules.replace(position2, mod2)
return False
return True |
_call_aside
def _initialize_master_working_set():
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script
tuple((dist.activate(replace=False) for dist in working_set))
add_activation_listener((lambda dist: dist.activate(replace=True)), existing=False)
working_set.entries = []
list(map(working_set.add_entry, sys.path))
globals().update(locals()) |
def bounding_control_points(beam, beam_collimation, rotation_direction, dose_rate):
cps = {}
cps['first'] = beam.ControlPointSequence[0]
cps['mid'] = beam.ControlPointSequence[1]
cps['last'] = beam.ControlPointSequence[(- 1)]
for cp in cps.values():
cp.BeamLimitingDevicePositionSequence = beam_collimation
cp.DoseRateSet = dose_rate
for key in ['first', 'mid']:
cps[key].BeamLimitingDeviceRotationDirection = rotation_direction
return cps |
def read_left_context_phones(filename):
ans = [line.strip(' \t\r\n') for line in open(filename, 'r', encoding='latin-1')]
if (len(ans) == 0):
raise RuntimeError('The file {0} contains no left-context phones.'.format(filename))
whitespace = re.compile('[ \t]+')
for s in ans:
if (len(whitespace.split(s)) != 1):
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s))
if (len(set(ans)) != len(ans)):
raise RuntimeError('Duplicate left-context phones are present in file {0}'.format(filename))
return ans |
def train_model(args, dr_train: DataReader, model, pset, nset):
assert torch.cuda.is_available(), 'no GPU available'
cuda = torch.device('cuda')
cpu = torch.device('cpu')
model.to(cuda)
gids = {'pos': pset, 'neg': nset}
gdata = {}
loader = {}
for key in ['pos', 'neg']:
gdata[key] = GraphData(dr_train, gids[key])
loader[key] = DataLoader(gdata[key], batch_size=args.batch_size, shuffle=False, collate_fn=collate_batch)
train_params = list(filter((lambda p: p.requires_grad), model.parameters()))
optimizer = optim.Adam(train_params, lr=args.lr, weight_decay=args.weight_decay, betas=(0.5, 0.999))
scheduler = lr_scheduler.MultiStepLR(optimizer, args.lr_decay_steps, gamma=0.1)
loss_fn = F.cross_entropy
model.train()
for epoch in range(args.train_epochs):
optimizer.zero_grad()
losses = {'pos': 0.0, 'neg': 0.0}
n_samples = {'pos': 0.0, 'neg': 0.0}
for key in ['pos', 'neg']:
for (batch_idx, data) in enumerate(loader[key]):
for i in range(len(data)):
data[i] = data[i].to(cuda)
output = model(data)
if (len(output.shape) == 1):
output = output.unsqueeze(0)
losses[key] += (loss_fn(output, data[4]) * len(output))
n_samples[key] += len(output)
for i in range(len(data)):
data[i] = data[i].to(cpu)
losses[key] = torch.div(losses[key], n_samples[key])
loss = (losses['pos'] + (args.lambd * losses['neg']))
loss.backward()
optimizer.step()
scheduler.step()
model.to(cpu) |
class ClippedScoreModifier(ScoreModifier):
def __init__(self, upper_x: float, lower_x=0.0, high_score=1.0, low_score=0.0) -> None:
assert (low_score < high_score)
self.upper_x = upper_x
self.lower_x = lower_x
self.high_score = high_score
self.low_score = low_score
self.slope = ((high_score - low_score) / (upper_x - lower_x))
self.intercept = (high_score - (self.slope * upper_x))
def __call__(self, x):
y = ((self.slope * x) + self.intercept)
return np.clip(y, self.low_score, self.high_score) |
def leet_clean(data):
def __convert_leet(word):
word = re.sub('0', 'o', word)
word = re.sub('1', 'i', word)
word = re.sub('3', 'e', word)
word = re.sub('\\$', 's', word)
word = re.sub('\\', 'a', word)
return word
if verbose:
print(('#' * 10), 'Step - L33T (with vocab check):')
local_vocab = {}
temp_vocab = _check_vocab(data, local_vocab, response='unknown_list')
temp_vocab = [k for k in temp_vocab if _check_replace(k)]
temp_dict = {}
for word in temp_vocab:
new_word = __convert_leet(word)
if (new_word != word):
if ((len(word) > 2) and (new_word in local_vocab)):
temp_dict[word] = new_word
data = list(map((lambda x: ' '.join([_make_dict_cleaning(i, temp_dict) for i in x.split()])), data))
if verbose:
_print_dict(temp_dict)
return data |
_REGISTRY.register()
class DDAIG(TrainerX):
def __init__(self, cfg):
super().__init__(cfg)
self.lmda = cfg.TRAINER.DDAIG.LMDA
self.clamp = cfg.TRAINER.DDAIG.CLAMP
self.clamp_min = cfg.TRAINER.DDAIG.CLAMP_MIN
self.clamp_max = cfg.TRAINER.DDAIG.CLAMP_MAX
self.warmup = cfg.TRAINER.DDAIG.WARMUP
self.alpha = cfg.TRAINER.DDAIG.ALPHA
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, self.num_classes)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
print('Building D')
self.D = SimpleNet(cfg, cfg.MODEL, self.dm.num_source_domains)
self.D.to(self.device)
print('# params: {:,}'.format(count_num_param(self.D)))
self.optim_D = build_optimizer(self.D, cfg.OPTIM)
self.sched_D = build_lr_scheduler(self.optim_D, cfg.OPTIM)
self.register_model('D', self.D, self.optim_D, self.sched_D)
print('Building G')
self.G = build_network(cfg.TRAINER.DDAIG.G_ARCH, verbose=cfg.VERBOSE)
self.G.to(self.device)
print('# params: {:,}'.format(count_num_param(self.G)))
self.optim_G = build_optimizer(self.G, cfg.OPTIM)
self.sched_G = build_lr_scheduler(self.optim_G, cfg.OPTIM)
self.register_model('G', self.G, self.optim_G, self.sched_G)
def forward_backward(self, batch):
(input, label, domain) = self.parse_batch_train(batch)
input_p = self.G(input, lmda=self.lmda)
if self.clamp:
input_p = torch.clamp(input_p, min=self.clamp_min, max=self.clamp_max)
loss_g = 0
loss_g += F.cross_entropy(self.F(input_p), label)
loss_g -= F.cross_entropy(self.D(input_p), domain)
self.model_backward_and_update(loss_g, 'G')
with torch.no_grad():
input_p = self.G(input, lmda=self.lmda)
if self.clamp:
input_p = torch.clamp(input_p, min=self.clamp_min, max=self.clamp_max)
loss_f = F.cross_entropy(self.F(input), label)
if ((self.epoch + 1) > self.warmup):
loss_fp = F.cross_entropy(self.F(input_p), label)
loss_f = (((1.0 - self.alpha) * loss_f) + (self.alpha * loss_fp))
self.model_backward_and_update(loss_f, 'F')
loss_d = F.cross_entropy(self.D(input), domain)
self.model_backward_and_update(loss_d, 'D')
loss_summary = {'loss_g': loss_g.item(), 'loss_f': loss_f.item(), 'loss_d': loss_d.item()}
if ((self.batch_idx + 1) == self.num_batches):
self.update_lr()
return loss_summary
def model_inference(self, input):
return self.F(input) |
class TestMagicEncode():
class TestInit():
def test_disabled_requires_encoding(self, driver: printer.Dummy) -> None:
with pytest.raises(Error):
MagicEncode(driver, disabled=True)
class TestWriteWithEncoding():
def test_init_from_none(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver, encoding=None)
encode.write_with_encoding('CP858', ' ist teuro.')
assert (driver.output == b'\x1bt\x13\xd5 ist teuro.')
def test_change_from_another(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver, encoding='CP437')
encode.write_with_encoding('CP858', ' ist teuro.')
assert (driver.output == b'\x1bt\x13\xd5 ist teuro.')
def test_no_change(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver, encoding='CP858')
encode.write_with_encoding('CP858', ' ist teuro.')
assert (driver.output == b'\xd5 ist teuro.')
class TestWrite():
def test_write(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver)
encode.write(' ist teuro.')
assert (driver.output == b'\x1bt\x0f\xa4 ist teuro.')
def test_write_disabled(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver, encoding='CP437', disabled=True)
encode.write(' ist teuro.')
assert (driver.output == b'? ist teuro.')
def test_write_no_codepage(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver, defaultsymbol='_', encoder=Encoder({'CP437': 1}), encoding='CP437')
encode.write(' ist teuro.')
assert (driver.output == b'_ ist teuro.')
class TestForceEncoding():
def test(self, driver: printer.Dummy) -> None:
encode = MagicEncode(driver)
encode.force_encoding('CP437')
assert (driver.output == b'\x1bt\x00')
encode.write(' ist teuro.')
assert (driver.output == b'\x1bt\x00? ist teuro.') |
.skipif((pytensor.config.floatX == 'float32'), reason='Test is designed for 64bit precision')
def test_log_exp_m1():
check_transform(tr.log_exp_m1, Rplusbig)
check_jacobian_det(tr.log_exp_m1, Rplusbig, elemwise=True)
check_jacobian_det(tr.log_exp_m1, Vector(Rplusbig, 2), pt.vector, [0, 0], elemwise=True)
vals = get_values(tr.log_exp_m1)
assert_array_equal((vals > 0), True) |
def _write_splits(splits_path: Path, splits: Splits) -> None:
logging.warning(f'Creating dataset splits file at {splits_path}')
with splits_path.open('w') as splits_file:
writer = csv.DictWriter(splits_file, fieldnames=FIELD_NAMES)
writer.writeheader()
for split_key in splits:
split = splits[split_key]
for video_name in split:
writer.writerow({FIELD_VIDEO_NAME: video_name, FIELD_SPLIT_KEY: split_key}) |
class CustomizedEpitranTokenizer(BaseTokenizer):
def __init__(self, lang_id, writing_system=None, lexicon=None):
super().__init__(lang_id, None)
self.lang_id = lang_id
self.writing_system = writing_system
if writing_system:
lang_id = ((lang_id + '-') + writing_system)
if (not lexicon):
lexicon = {}
self.lexicon = lexicon
self.g2p = read_epitran_g2p(lang_id)
self.inv = read_inventory(self.lang_id)
self.ipa = read_ipa()
self.regexp = self._construct_regex(self.g2p.keys())
self.nils = defaultdict(int)
self.cache = defaultdict(list)
self.preprocessor = PrePostProcessor(lang_id, 'pre', False)
def _construct_regex(self, g2p_keys):
graphemes = sorted(g2p_keys, key=len, reverse=True)
return re.compile(f"({'|'.join(graphemes)})", re.I)
def match_word(self, text, verbose=False):
ipa_lst = []
while text:
logger.debug('text=', repr(list(text)))
if verbose:
print('text=', repr(list(text)))
m = self.regexp.match(text)
if m:
source = m.group(0)
try:
targets = self.g2p[source]
if verbose:
print(source, ' -> ', targets)
except KeyError:
logger.debug("source = '%s''", source)
logger.debug("self.g2p[source] = %s'", self.g2p[source])
targets = []
except IndexError:
logger.debug('self.g2p[source]= %s', self.g2p[source])
targets = []
ipa_lst.extend(targets)
text = text[len(source):]
else:
self.nils[text[0]] += 2
text = text[1:]
ipa_lst = self.inv.remap(ipa_lst)
return ipa_lst
def tokenize(self, text: str, verbose: bool=False):
text = text.lower()
ipa_lst = []
for word in text.split():
if (word in self.cache):
ipa_lst.extend(self.cache[word])
elif (word in self.lexicon):
phonemes = self.lexicon[word]
ipa_lst.extend(phonemes)
self.cache[word] = phonemes
log = f'lexicon {word} -> {phonemes}'
self.logger.info(log)
if verbose:
print(log)
else:
norm_word = unicodedata.normalize('NFC', word)
norm_word = self.preprocessor.process(norm_word)
word_ipa_lst = self.match_word(norm_word, verbose)
log = f'rule raw: {word} -> norm: {norm_word} -> {word_ipa_lst}'
self.logger.info(log)
if verbose:
print(log)
self.cache[word] = word_ipa_lst
ipa_lst.extend(self.cache[word])
return ipa_lst |
def create_optimizer(init_lr, num_train_steps, num_warmup_steps):
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(initial_learning_rate=init_lr, decay_steps=num_train_steps, end_learning_rate=0.0)
if num_warmup_steps:
learning_rate_fn = WarmUp(initial_learning_rate=init_lr, decay_schedule_fn=learning_rate_fn, warmup_steps=num_warmup_steps)
optimizer = AdamWeightDecay(learning_rate=learning_rate_fn, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['layer_norm', 'bias'])
return optimizer |
class CallReceiver(Receiver):
_e_factors = ('callable',)
protocol = PROTOCOL_CHUNKS
def __init__(self, callable):
self.callable = callable
self.lines = None
super().__init__()
def transmit(self):
if (self.lines is not None):
self.callable(self.lines)
self.lines = None
def accept(self, lines):
self.lines = lines |
class _CppLintState(object):
def __init__(self):
self.verbose_level = 1
self.error_count = 0
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total'
self.errors_by_category = {}
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
self.output_format = output_format
def SetVerboseLevel(self, level):
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
self.counting = counting_style
def SetFilters(self, filters):
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if (not (filt.startswith('+') or filt.startswith('-'))):
raise ValueError(('Every filter in --filters must start with + or - (%s does not)' % filt))
def ResetErrorCounts(self):
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
self.error_count += 1
if (self.counting in ('toplevel', 'detailed')):
if (self.counting != 'detailed'):
category = category.split('/')[0]
if (category not in self.errors_by_category):
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
for (category, count) in self.errors_by_category.iteritems():
sys.stderr.write(("Category '%s' errors found: %d\n" % (category, count)))
sys.stderr.write(('Total errors found: %d\n' % self.error_count)) |
class TestSVD():
def op_numpy(self, A):
return scipy.linalg.svd(A)
def _gen_dm(self, N, rank, dtype):
return qutip.rand_dm(N, rank=rank, dtype=dtype).data
def _gen_non_square(self, N):
mat = np.random.randn(N, (N // 2))
for i in range((N // 2)):
mat[(i, i)] += 5
return _data.Dense(mat)
.parametrize('shape', ['square', 'non-square'])
def test_mathematically_correct_svd(self, shape):
if (shape == 'square'):
matrix = self._gen_dm(10, 6, Dense)
else:
matrix = self._gen_non_square(12)
(u, s, v) = self.op_numpy(matrix.to_array())
(test_U, test_S, test_V) = _data.svd(matrix, True)
only_S = _data.svd(matrix, False)
assert (sum((test_S > 1e-10)) == 6)
np.testing.assert_allclose(np.abs(test_U.to_array()), np.abs(u), atol=1e-07, rtol=1e-07)
np.testing.assert_allclose(np.abs(test_V.to_array()), np.abs(v), atol=1e-07, rtol=1e-07)
np.testing.assert_allclose(test_S, s, atol=1e-07, rtol=1e-07)
np.testing.assert_allclose(only_S, s, atol=1e-07, rtol=1e-07)
s_as_matrix = _data.diag(test_S, 0, (test_U.shape[1], test_V.shape[0]))
np.testing.assert_allclose(matrix.to_array(), ((test_U s_as_matrix) test_V).to_array(), atol=1e-07, rtol=1e-07)
def test_mathematically_correct_svd_csr(self):
rank = 5
matrix = self._gen_dm(100, rank, CSR)
(test_U, test_S1, test_V) = _data.svd_csr(matrix, True, k=rank)
test_S2 = _data.svd_csr(matrix, False, k=rank)
assert (len(test_S1) == rank)
assert (len(test_S2) == rank)
np.testing.assert_allclose(matrix.to_array(), ((test_U _data.diag(test_S1, 0)) test_V).to_array(), atol=1e-07, rtol=1e-07) |
class MappingType(BaseType):
MAPPING: DictType[(str, Tuple[(Any, Optional[str])])] = {}
def __init__(self, *, none_ok: bool=False, completions: _Completions=None) -> None:
super().__init__(none_ok=none_ok, completions=completions)
self.valid_values = ValidValues(*[(key, doc) for (key, (_val, doc)) in self.MAPPING.items()])
def to_py(self, value: Any) -> Any:
self._basic_py_validation(value, str)
if isinstance(value, usertypes.Unset):
return value
elif (not value):
return None
self._validate_valid_values(value.lower())
(mapped, _doc) = self.MAPPING[value.lower()]
return mapped
def __repr__(self) -> str:
return utils.get_repr(self, none_ok=self.none_ok, valid_values=self.valid_values) |
class TestInputMediaVideoWithoutRequest(TestInputMediaVideoBase):
def test_slot_behaviour(self, input_media_video):
inst = input_media_video
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, input_media_video):
assert (input_media_video.type == self.type_)
assert (input_media_video.media == self.media)
assert (input_media_video.caption == self.caption)
assert (input_media_video.width == self.width)
assert (input_media_video.height == self.height)
assert (input_media_video.duration == self.duration)
assert (input_media_video.parse_mode == self.parse_mode)
assert (input_media_video.caption_entities == tuple(self.caption_entities))
assert (input_media_video.supports_streaming == self.supports_streaming)
assert isinstance(input_media_video.thumbnail, InputFile)
assert (input_media_video.has_spoiler == self.has_spoiler)
def test_caption_entities_always_tuple(self):
input_media_video = InputMediaVideo(self.media)
assert (input_media_video.caption_entities == ())
def test_to_dict(self, input_media_video):
input_media_video_dict = input_media_video.to_dict()
assert (input_media_video_dict['type'] == input_media_video.type)
assert (input_media_video_dict['media'] == input_media_video.media)
assert (input_media_video_dict['caption'] == input_media_video.caption)
assert (input_media_video_dict['width'] == input_media_video.width)
assert (input_media_video_dict['height'] == input_media_video.height)
assert (input_media_video_dict['duration'] == input_media_video.duration)
assert (input_media_video_dict['parse_mode'] == input_media_video.parse_mode)
assert (input_media_video_dict['caption_entities'] == [ce.to_dict() for ce in input_media_video.caption_entities])
assert (input_media_video_dict['supports_streaming'] == input_media_video.supports_streaming)
assert (input_media_video_dict['has_spoiler'] == input_media_video.has_spoiler)
def test_with_video(self, video):
input_media_video = InputMediaVideo(video, caption='test 3')
assert (input_media_video.type == self.type_)
assert (input_media_video.media == video.file_id)
assert (input_media_video.width == video.width)
assert (input_media_video.height == video.height)
assert (input_media_video.duration == video.duration)
assert (input_media_video.caption == 'test 3')
def test_with_video_file(self, video_file):
input_media_video = InputMediaVideo(video_file, caption='test 3')
assert (input_media_video.type == self.type_)
assert isinstance(input_media_video.media, InputFile)
assert (input_media_video.caption == 'test 3')
def test_with_local_files(self):
input_media_video = InputMediaVideo(data_file('telegram.mp4'), thumbnail=data_file('telegram.jpg'))
assert (input_media_video.media == data_file('telegram.mp4').as_uri())
assert (input_media_video.thumbnail == data_file('telegram.jpg').as_uri()) |
def convert_cached_name(file_name, batch_size):
prefix = (((CACHE_DIR + 'batch_size_') + str(batch_size)) + '_')
prefix += file_name.strip().split('/')[(- 1)]
train_cache_name = prefix.replace('.txt', '.tfrecord').replace('.csv', '.tfrecord').replace('.libsvm', '.tfrecord')
return train_cache_name |
class ResumeStream(Scaffold):
async def resume_stream(self, chat_id: Union[(int, str)]):
if (self._app is None):
raise NoMTProtoClientSet()
if (not self._is_running):
raise ClientNotStarted()
chat_id = (await self._resolve_chat_id(chat_id))
try:
status = (await ToAsync(self._binding.resume, chat_id))
return status
except ConnectionError:
raise NotInGroupCallError() |
class _TopLevelFinder():
def __init__(self, dist: Distribution, name: str):
self.dist = dist
self.name = name
def __call__(self, wheel: 'WheelFile', files: List[str], mapping: Dict[(str, str)]):
src_root = (self.dist.src_root or os.curdir)
top_level = chain(_find_packages(self.dist), _find_top_level_modules(self.dist))
package_dir = (self.dist.package_dir or {})
roots = _find_package_roots(top_level, package_dir, src_root)
namespaces_: Dict[(str, List[str])] = dict(chain(_find_namespaces((self.dist.packages or []), roots), ((ns, []) for ns in _find_virtual_namespaces(roots))))
legacy_namespaces = {pkg: find_package_path(pkg, roots, (self.dist.src_root or '')) for pkg in (self.dist.namespace_packages or [])}
mapping = {**roots, **legacy_namespaces}
name = f'__editable__.{self.name}.finder'
finder = _normalization.safe_identifier(name)
content = bytes(_finder_template(name, mapping, namespaces_), 'utf-8')
wheel.writestr(f'{finder}.py', content)
content = _encode_pth(f'import {finder}; {finder}.install()')
wheel.writestr(f'__editable__.{self.name}.pth', content)
def __enter__(self):
msg = 'Editable install will be performed using a meta path finder.\n'
_logger.warning((msg + _LENIENT_WARNING))
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
msg = '\n\n Please be careful with folders in your working directory with the same\n name as your package as they may take precedence during imports.\n '
InformationOnly.emit('Editable installation.', msg) |
def draw_tsp_solution(G, order, colors, pos):
G2 = nx.DiGraph()
G2.add_nodes_from(G)
n = len(order)
for i in range(n):
j = ((i + 1) % n)
G2.add_edge(order[i], order[j], weight=G[order[i]][order[j]]['weight'])
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G2, node_color=colors, edge_color='b', node_size=600, alpha=0.8, ax=default_axes, pos=pos)
edge_labels = nx.get_edge_attributes(G2, 'weight')
nx.draw_networkx_edge_labels(G2, pos, font_color='b', edge_labels=edge_labels) |
class DataclassTransformSpec():
__slots__ = ('eq_default', 'order_default', 'kw_only_default', 'frozen_default', 'field_specifiers')
def __init__(self, *, eq_default: (bool | None)=None, order_default: (bool | None)=None, kw_only_default: (bool | None)=None, field_specifiers: (tuple[(str, ...)] | None)=None, frozen_default: (bool | None)=None) -> None:
self.eq_default = (eq_default if (eq_default is not None) else True)
self.order_default = (order_default if (order_default is not None) else False)
self.kw_only_default = (kw_only_default if (kw_only_default is not None) else False)
self.frozen_default = (frozen_default if (frozen_default is not None) else False)
self.field_specifiers = (field_specifiers if (field_specifiers is not None) else ())
def serialize(self) -> JsonDict:
return {'eq_default': self.eq_default, 'order_default': self.order_default, 'kw_only_default': self.kw_only_default, 'frozen_default': self.frozen_default, 'field_specifiers': list(self.field_specifiers)}
def deserialize(cls, data: JsonDict) -> DataclassTransformSpec:
return DataclassTransformSpec(eq_default=data.get('eq_default'), order_default=data.get('order_default'), kw_only_default=data.get('kw_only_default'), frozen_default=data.get('frozen_default'), field_specifiers=tuple(data.get('field_specifiers', []))) |
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--classes', help='The number of classes of dataset.')
parser.add_argument('--size', default=224, help='The image size of train sample.')
parser.add_argument('--batch', default=32, help='The number of train samples per batch.')
parser.add_argument('--epochs', default=300, help='The number of train iterations.')
parser.add_argument('--weights', default=False, help='Fine tune with other weights.')
parser.add_argument('--tclasses', default=0, help='The number of classes of pre-trained model.')
args = parser.parse_args()
train(int(args.batch), int(args.epochs), int(args.classes), int(args.size), args.weights, int(args.tclasses)) |
def delete_user(user=None, email=None, password=None):
if ((user is None) or (email is None)):
log.debug('Deletion failed because either User or email is None')
return False
username = user.username
database_user = get_user_from_db_or_none(username, email)
if (database_user is None):
log.info('Deletion of user "%s" failed, get_user_from_db_or_none returned None.', username)
return False
if (user != database_user):
log.info('Deletion of user "%s" failed, the user from request (pk=%s) and database (pk=%s) differ.', username, user.pk, database_user.pk)
return False
if (user.has_usable_password() and (password is not None)):
authenticated = authenticate(username=username, password=password)
if (authenticated is None):
log.info('Deletion of user with usable password "%s" failed, authenticate returned None.', username)
return False
try:
user.delete()
log.info('Deletion of user with usable password "%s" succeeded.', username)
return True
except Exception as e:
log.error('Deletion of user with usable password "%s" failed, an exception (%s) occured', str(e), username)
return False
elif ((not user.has_usable_password()) and (password is None)):
try:
user.delete()
log.info('Deletion of user without usable password "%s" succeeded.', username)
return True
except Exception as e:
log.error('Deletion of user without usable password "%s" failed, an exception (%s) occured', str(e), username)
return False
else:
log.info('Deletion of user "%s" failed, probably wrong value for password given', username)
return False |
class BoW(nn.Module):
def __init__(self, vocab: List[str], word_weights: Dict[(str, float)]={}, unknown_word_weight: float=1, cumulative_term_frequency: bool=True):
super(BoW, self).__init__()
vocab = list(set(vocab))
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight', 'cumulative_term_frequency']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if (word in word_weights):
weight = word_weights[word]
elif (word.lower() in word_weights):
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info('{} out of {} words without a weighting value. Set weight to {}'.format(num_unknown_words, len(vocab), unknown_word_weight))
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[(str, Tensor)]):
return features
def tokenize(self, texts: List[str]) -> List[int]:
tokenized = [self.tokenizer.tokenize(text) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int=0):
vectors = []
for tokens in tokenized_texts:
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {'sentence_embedding': torch.tensor(vectors, dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return BoW(**config) |
def test_multiple_workspaces_from_initialize(pylsp_w_workspace_folders):
(pylsp, workspace_folders) = pylsp_w_workspace_folders
assert (len(pylsp.workspaces) == 2)
folders_uris = [uris.from_fs_path(str(folder)) for folder in workspace_folders]
for folder_uri in folders_uris:
assert (folder_uri in pylsp.workspaces)
assert (folders_uris[0] == pylsp.root_uri)
file1 = workspace_folders[0].join('file1.py')
file1.write('import os')
msg1 = {'uri': path_as_uri(str(file1)), 'version': 1, 'text': 'import os'}
pylsp.m_text_document__did_open(textDocument=msg1)
assert (msg1['uri'] in pylsp.workspace._docs)
assert (msg1['uri'] in pylsp.workspaces[folders_uris[0]]._docs)
file2 = workspace_folders[1].join('file2.py')
file2.write('import sys')
msg2 = {'uri': path_as_uri(str(file2)), 'version': 1, 'text': 'import sys'}
pylsp.m_text_document__did_open(textDocument=msg2)
assert (msg2['uri'] not in pylsp.workspace._docs)
assert (msg2['uri'] in pylsp.workspaces[folders_uris[1]]._docs) |
def get_module_cache(dirname: str, init_args=None) -> ModuleCache:
global _module_cache
if (init_args is None):
init_args = {}
if (_module_cache is None):
_module_cache = ModuleCache(dirname, **init_args)
atexit.register(_module_cache._on_atexit)
elif init_args:
warnings.warn('Ignoring init arguments for module cache because it was created prior to this call')
if (_module_cache.dirname != dirname):
warnings.warn(f'Returning module cache instance with different dirname ({_module_cache.dirname}) than requested ({dirname})')
return _module_cache |
def test_merge_sauce_options(monkeypatch, testdir):
version = {'seleniumVersion': '3.8.1'}
capabilities = {'browserName': 'chrome', 'sauce:options': version}
expected = {'name': 'test_merge_sauce_options.test_sauce_capabilities'}
expected.update(version)
run_w3c_sauce_test(capabilities, expected, monkeypatch, testdir) |
def sv_l(d_embeddings, l_eval_trial, args):
d_embeddings_all = d_embeddings[0]
d_embeddings_1 = d_embeddings[1]
d_embeddings_2 = d_embeddings[2]
d_embeddings_5 = d_embeddings[3]
(y, y_score_org, y_score_1, y_score_2, y_score_5) = ([], [], [], [], [])
l_trial_split = split_list(l_in=l_eval_trial, nb_split=args.nb_proc_eval, d_embeddings=[d_embeddings_all, d_embeddings_1, d_embeddings_2, d_embeddings_5])
p = Pool(args.nb_proc_eval)
res = p.map(_sp_process_trial_l, l_trial_split)
for (_y, _y_s_org, _y_s_1, _y_s_2, _y_s_5) in res:
y.extend(_y)
y_score_org.extend(_y_s_org)
y_score_1.extend(_y_s_1)
y_score_2.extend(_y_s_2)
y_score_5.extend(_y_s_5)
ys = [y_score_org, y_score_1, y_score_2, y_score_5]
(l_eer, l_min_dcf) = ([], [])
for y_s in ys:
(fpr, tpr, thresholds) = roc_curve(y, y_s, pos_label=1)
fnr = (1 - tpr)
p.close()
p.join()
l_eer.append(get_eer(fnr, fpr))
l_min_dcf.append(get_min_dcf(fpr, fnr))
return (l_eer, l_min_dcf) |
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = (len(train_loader) // max(total_epochs, 1))
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if (train_sampler is not None):
train_sampler.set_epoch(cur_epoch)
if ((lr_warmup_scheduler is not None) and (cur_epoch < optim_cfg.WARMUP_EPOCH)):
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler=cur_scheduler, accumulated_iter=accumulated_iter, optim_cfg=optim_cfg, rank=rank, tbar=tbar, tb_log=tb_log, leave_pbar=((cur_epoch + 1) == total_epochs), total_it_each_epoch=total_it_each_epoch, dataloader_iter=dataloader_iter)
trained_epoch = (cur_epoch + 1)
if (((trained_epoch % ckpt_save_interval) == 0) and (rank == 0)):
ckpt_list = glob.glob(str((ckpt_save_dir / 'checkpoint_epoch_*.pth')))
ckpt_list.sort(key=os.path.getmtime)
if (ckpt_list.__len__() >= max_ckpt_save_num):
for cur_file_idx in range(0, ((len(ckpt_list) - max_ckpt_save_num) + 1)):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = (ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch))
save_checkpoint(checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name) |
class BeaverSshTunnel(BeaverSubprocess):
def __init__(self, beaver_config, logger=None):
super(BeaverSshTunnel, self).__init__(beaver_config, logger=logger)
self._log_template = '[BeaverSshTunnel] - {0}'
key_file = beaver_config.get('ssh_key_file')
tunnel = beaver_config.get('ssh_tunnel')
tunnel_port = beaver_config.get('ssh_tunnel_port')
remote_host = beaver_config.get('ssh_remote_host')
remote_port = beaver_config.get('ssh_remote_port')
ssh_opts = []
if self.get_port(tunnel):
ssh_opts.append('-p {0}'.format(self.get_port(tunnel)))
tunnel = self.get_host(tunnel)
ssh_opts.append('-n')
ssh_opts.append('-N')
ssh_opts.append('-o BatchMode=yes')
ssh_opts = (ssh_opts + beaver_config.get('ssh_options'))
command = 'while true; do ssh {0} -i "{4}" "{5}" -L "{1}:{2}:{3}"; sleep 10; done'
self._command = command.format(' '.join(ssh_opts), tunnel_port, remote_host, remote_port, key_file, tunnel)
self.run()
def get_host(self, tunnel=None):
port = self.get_port(tunnel)
if (not port):
return tunnel
return tunnel[0:(- (len(port) + 1))]
def get_port(self, tunnel=None):
host_port = None
port = None
if tunnel:
host_port = tunnel.split('')[(- 1)]
if (host_port and (len(host_port.split(':')) == 2)):
port = host_port.split(':')[(- 1)]
return port |
def safe_walk(path: str) -> Iterable[tuple[(str, list[str], list[str])]]:
seen = set()
for (root, dirs, files) in os.walk(path, followlinks=True):
stat = os.stat(root)
identifier = (stat.st_dev, stat.st_ino)
if (identifier in seen):
del dirs[:]
continue
seen.add(identifier)
(yield (root, dirs, files)) |
class MMD_NCA_loss(nn.Module):
def __init__(self):
super().__init__()
def kernel_function(self, x1, x2):
k1 = torch.exp(((- torch.pow((x1 - x2), 2)) / 2))
k2 = torch.exp(((- torch.pow((x1 - x2), 2)) / 8))
k4 = torch.exp(((- torch.pow((x1 - x2), 2)) / 32))
k8 = torch.exp(((- torch.pow((x1 - x2), 2)) / 128))
k16 = torch.exp(((- torch.pow((x1 - x2), 2)) / 512))
k_sum = ((((k1 + k2) + k4) + k8) + k16)
return k_sum
def MMD(self, x, x_IID, y, y_IID):
m = x.size()[0]
n = y.size()[0]
x = x.view(m, 1, (- 1))
x_square = x.repeat(1, m, 1)
x_IID = x_IID.view((- 1), m, 1)
x_IID_square = x_IID.repeat(m, 1, 1)
value_1 = (torch.sum(self.kernel_function(x_square, x_IID_square)) / (m ** 2))
y = y.view(1, n, (- 1))
y_square = y.repeat(n, 1, 1)
value_2 = (torch.sum(self.kernel_function(x_square, y_square)) / (m * n))
y_IID = y_IID.view(n, 1, (- 1))
y_IID_square = y_IID.repeat(1, n, 1)
value_3 = (torch.sum(self.kernel_function(y_IID_square, y_square)) / (n ** 2))
return ((value_1 - (2 * value_2)) + value_3)
def forward(self, x):
x = x.view(7, 25)
numerator = torch.exp((- self.MMD(x[0], x[0], x[1], x[1])))
value_1 = torch.exp((- self.MMD(x[0], x[0], x[2], x[2])))
value_2 = torch.exp((- self.MMD(x[0], x[0], x[3], x[3])))
value_3 = torch.exp((- self.MMD(x[0], x[0], x[4], x[4])))
value_4 = torch.exp((- self.MMD(x[0], x[0], x[5], x[5])))
value_5 = torch.exp((- self.MMD(x[0], x[0], x[6], x[6])))
denominator = ((((value_1 + value_2) + value_3) + value_4) + value_5)
loss = torch.exp(((- numerator) / denominator))
return loss |
class SegNet(nn.Module):
def __init__(self, input_nbr=3, label_nbr=19):
super(SegNet, self).__init__()
batchNorm_momentum = 0.1
self.conv11 = nn.Conv2d(input_nbr, 64, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv53d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv52d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv51d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv43d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv42d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv41d = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv33d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv32d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv31d = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv22d = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv21d = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv12d = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv11d = nn.Conv2d(64, label_nbr, kernel_size=3, padding=1)
def forward(self, x):
x11 = F.relu(self.bn11(self.conv11(x)))
x12 = F.relu(self.bn12(self.conv12(x11)))
(x1p, id1) = F.max_pool2d(x12, kernel_size=2, stride=2, return_indices=True)
x21 = F.relu(self.bn21(self.conv21(x1p)))
x22 = F.relu(self.bn22(self.conv22(x21)))
(x2p, id2) = F.max_pool2d(x22, kernel_size=2, stride=2, return_indices=True)
x31 = F.relu(self.bn31(self.conv31(x2p)))
x32 = F.relu(self.bn32(self.conv32(x31)))
x33 = F.relu(self.bn33(self.conv33(x32)))
(x3p, id3) = F.max_pool2d(x33, kernel_size=2, stride=2, return_indices=True)
x41 = F.relu(self.bn41(self.conv41(x3p)))
x42 = F.relu(self.bn42(self.conv42(x41)))
x43 = F.relu(self.bn43(self.conv43(x42)))
(x4p, id4) = F.max_pool2d(x43, kernel_size=2, stride=2, return_indices=True)
x51 = F.relu(self.bn51(self.conv51(x4p)))
x52 = F.relu(self.bn52(self.conv52(x51)))
x53 = F.relu(self.bn53(self.conv53(x52)))
(x5p, id5) = F.max_pool2d(x53, kernel_size=2, stride=2, return_indices=True)
x5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=2)
x53d = F.relu(self.bn53d(self.conv53d(x5d)))
x52d = F.relu(self.bn52d(self.conv52d(x53d)))
x51d = F.relu(self.bn51d(self.conv51d(x52d)))
x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)
x43d = F.relu(self.bn43d(self.conv43d(x4d)))
x42d = F.relu(self.bn42d(self.conv42d(x43d)))
x41d = F.relu(self.bn41d(self.conv41d(x42d)))
x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)
x33d = F.relu(self.bn33d(self.conv33d(x3d)))
x32d = F.relu(self.bn32d(self.conv32d(x33d)))
x31d = F.relu(self.bn31d(self.conv31d(x32d)))
x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)
x22d = F.relu(self.bn22d(self.conv22d(x2d)))
x21d = F.relu(self.bn21d(self.conv21d(x22d)))
x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)
x12d = F.relu(self.bn12d(self.conv12d(x1d)))
x11d = self.conv11d(x12d)
return x11d
def load_from_segnet(self, model_path):
s_dict = self.state_dict()
th = torch.load(model_path).state_dict()
self.load_state_dict(th) |
_env('DR-PickCube-v1', max_episode_steps=100, override=True)
class DomainRandomizationPickCubeEnvV1(PickCubeEnv):
def reset(self, seed=None, reconfigure=True):
return super().reset(seed, reconfigure)
def _load_actors(self):
self.cube_half_size = self._episode_rng.uniform(0.01, 0.03, size=3)
super()._load_actors() |
class LabelAccuracyEvaluator(SentenceEvaluator):
def __init__(self, dataloader: DataLoader, name: str='', softmax_model=None):
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = ('_' + name)
self.csv_file = (('accuracy_evaluation' + name) + '_results.csv')
self.csv_headers = ['epoch', 'steps', 'accuracy']
def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float:
model.eval()
total = 0
correct = 0
if (epoch != (- 1)):
if (steps == (- 1)):
out_txt = ' after epoch {}:'.format(epoch)
else:
out_txt = ' in epoch {} after {} steps:'.format(epoch, steps)
else:
out_txt = ':'
logging.info(((('Evaluation on the ' + self.name) + ' dataset') + out_txt))
self.dataloader.collate_fn = model.smart_batching_collate
for (step, batch) in enumerate(tqdm(self.dataloader, desc='Evaluating')):
(features, label_ids) = batch_to_device(batch, model.device)
with torch.no_grad():
(_, prediction) = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = (correct / total)
logging.info('Accuracy: {:.4f} ({}/{})\n'.format(accuracy, correct, total))
if (output_path is not None):
csv_path = os.path.join(output_path, self.csv_file)
if (not os.path.isfile(csv_path)):
with open(csv_path, mode='w', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, mode='a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
return accuracy |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
if (args.arch in models.__dict__):
model = models.__dict__[args.arch]()
elif (args.arch == 'resnet18d'):
from models.resnet_imagenet import resnet18d
model = resnet18d(deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
elif (args.arch == 'resnet34d'):
from models.resnet_imagenet import resnet34d
model = resnet34d(deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
elif (args.arch == 'resnet50d'):
from models.resnet_imagenet import resnet50d
model = resnet50d(deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
elif (args.arch == 'resnet101d'):
from models.resnet_imagenet import resnet101d
model = resnet101d(deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
elif (args.arch == 'vgg11d'):
from models.vgg_imagenet import vgg11d
model = vgg11d('VGG11d', deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
elif (args.arch == 'vgg16d'):
from models.vgg_imagenet import vgg16d
model = vgg16d('VGG16d', deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
elif (args.arch == 'densenet121d'):
from models.densenet_imagenet import densenet121d
model = densenet121d(deconv=args.deconv, delinear=args.delinear, channel_deconv=args.channel_deconv)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((args.workers / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
print(args)
parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in parameters])
print(params, 'trainable parameters in the network.')
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if args.evaluate:
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
validate(val_loader, model, criterion, 0, args)
return
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
if (args.lr_scheduler == 'multistep'):
milestones = [int((args.milestone * args.epochs))]
while ((milestones[(- 1)] + milestones[0]) < args.epochs):
milestones.append((milestones[(- 1)] + milestones[0]))
args.current_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=args.multistep_gamma)
if (args.lr_scheduler == 'step'):
args.current_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.scheduler_step_size, gamma=args.multistep_gamma)
if (args.lr_scheduler == 'cosine'):
total_steps = (math.ceil((len(train_dataset) / args.batch_size)) * args.epochs)
args.current_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, total_steps, eta_min=0, last_epoch=(- 1))
if args.resume:
lr = args.lr
if ((args.lr_scheduler == 'multistep') or (args.lr_scheduler == 'step')):
for i in range(args.start_epoch):
args.current_scheduler.step()
if (args.lr_scheduler == 'cosine'):
total_steps = (math.ceil((len(train_dataset) / args.batch_size)) * args.start_epoch)
global n_iter
for i in range(total_steps):
n_iter = (n_iter + 1)
args.current_scheduler.step()
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
if ((args.lr_scheduler == 'multistep') or (args.lr_scheduler == 'step')):
args.current_scheduler.step()
if ((args.lr_scheduler == 'multistep') or (args.lr_scheduler == 'step') or (args.lr_scheduler == 'cosine')):
print('Current learning rate:', args.current_scheduler.get_lr()[0])
train(train_loader, model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, criterion, epoch, args)
if args.save_plot:
plt.subplot(1, 3, 1)
plt.title('Loss Plot', fontsize=10)
plt.xlabel('Epochs', fontsize=10)
plt.ylabel('Loss', fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.plot(args.train_losses, 'b')
plt.plot(args.eval_losses, 'r')
plt.subplot(1, 3, 2)
plt.title('Top 1 Accuracy Plot', fontsize=10)
plt.xlabel('Epochs', fontsize=10)
plt.ylabel('Top 1 Acc', fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.plot(args.train_top1, 'b')
plt.plot(args.eval_top1, 'r')
plt.subplot(1, 3, 3)
plt.title('Top 5 Accuracy Plot', fontsize=10)
plt.xlabel('Epochs', fontsize=10)
plt.ylabel('Top 5 Acc', fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.plot(args.train_top5, 'b')
plt.plot(args.eval_top5, 'r')
plt.savefig(os.path.join(args.log_dir, 'TrainingPlots'))
plt.clf()
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best, path=args.log_dir)
args.writer.close() |
.functions
def test_groupby_agg_multi():
df = pd.DataFrame({'date': ['', '', '', '', '', ''], 'user_id': [1, 2, 1, 2, 1, 2], 'values': [1, 2, 3, 4, 5, 6]})
df_new = df.groupby_agg(by=['date', 'user_id'], new_column_name='date_average', agg_column_name='values', agg=np.count_nonzero)
expected_agg = np.array([1, 1, 1, 1, 1, 1])
np.testing.assert_equal(df_new['date_average'], expected_agg) |
class AngleCalFitter(BaseGateFitter):
def __init__(self, backend_result, xdata, qubits, fit_p0, fit_bounds):
circuit_names = []
for (cind, _) in enumerate(xdata):
circuit_names.append(('anglecal1Qcircuit_%d_' % cind))
BaseGateFitter.__init__(self, '$AngleCal1Q$', backend_result, xdata, qubits, self._angle_cal_fit, fit_p0, fit_bounds, circuit_names, expected_state='1')
def _angle_cal_fit(x, thetaerr, c):
return AngleCalFitter._cal_fit_fun(x, (- 0.5), thetaerr, thetaerr, (np.pi / 2), (np.pi / 2), c)
def angle_err(self, qind=(- 1)):
fitparam = self._get_param(0, qind, series='0', err=False)
return (np.array(fitparam) / 2)
def plot(self, qind, series='0', ax=None, show_plot=False):
ax = BaseGateFitter.plot(self, qind, series, ax, show_plot)
return ax |
class SawyerPlateSlideBackSideEnvV2(SawyerXYZEnv):
def __init__(self):
goal_low = ((- 0.05), 0.6, 0.015)
goal_high = (0.15, 0.6, 0.015)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.25), 0.6, 0.0)
obj_high = ((- 0.25), 0.6, 0.0)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([(- 0.25), 0.6, 0.02], dtype=np.float32), 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32)}
self.goal = np.array([0.0, 0.6, 0.015])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_plate_slide_sideway.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place) = self.compute_reward(action, obs)
success = float((obj_to_target <= 0.07))
near_object = float((tcp_to_obj <= 0.03))
info = {'success': success, 'near_object': near_object, 'grasp_success': 0.0, 'grasp_reward': object_grasped, 'in_place_reward': in_place, 'obj_to_target': obj_to_target, 'unscaled_reward': reward}
return (reward, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('puck')
def _get_quat_objects(self):
return Rotation.from_matrix(self.data.get_geom_xmat('puck')).as_quat()
def _get_obs_dict(self):
return dict(state_observation=self._get_obs(), state_desired_goal=self._target_pos, state_achieved_goal=self._get_pos_objects())
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self.obj_init_pos = self.init_config['obj_init_pos']
self._target_pos = self.goal.copy()
if self.random_init:
rand_vec = self._get_state_rand_vec()
self.obj_init_pos = rand_vec[:3]
self._target_pos = rand_vec[3:]
self.sim.model.body_pos[self.model.body_name2id('puck_goal')] = self.obj_init_pos
self._set_obj_xyz(np.array([(- 0.15), 0.0]))
return self._get_obs()
def compute_reward(self, actions, obs):
_TARGET_RADIUS = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
obj_to_target = np.linalg.norm((obj - target))
in_place_margin = np.linalg.norm((self.obj_init_pos - target))
in_place = reward_utils.tolerance(obj_to_target, bounds=(0, _TARGET_RADIUS), margin=(in_place_margin - _TARGET_RADIUS), sigmoid='long_tail')
tcp_to_obj = np.linalg.norm((tcp - obj))
obj_grasped_margin = np.linalg.norm((self.init_tcp - self.obj_init_pos))
object_grasped = reward_utils.tolerance(tcp_to_obj, bounds=(0, _TARGET_RADIUS), margin=(obj_grasped_margin - _TARGET_RADIUS), sigmoid='long_tail')
reward = (1.5 * object_grasped)
if ((tcp[2] <= 0.03) and (tcp_to_obj < 0.07)):
reward = (2 + (7 * in_place))
if (obj_to_target < _TARGET_RADIUS):
reward = 10.0
return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place] |
def run(config):
if (config['wandb_entity'] is not None):
init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet')
if (config['G_path'] is None):
download_G()
config['G_path'] = 'checkpoints/138k'
(G, state_dict, device, experiment_name) = load_G(config)
if config['parallel']:
G = nn.DataParallel(DataParallelLoss(G))
if config['cross_replica']:
patch_replication_callback(G)
num_gpus = torch.cuda.device_count()
print(f'Using {num_gpus} GPUs')
pad = get_direction_padding_fn(config)
direction_size = (config['dim_z'] if (config['search_space'] == 'all') else config['ndirs'])
if (config['load_A'] == 'coords'):
print('Initializing with standard basis directions')
A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True)
elif (config['load_A'] == 'random'):
print('Initializing with random directions')
A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True)
torch.nn.init.kaiming_normal_(A)
else:
raise NotImplementedError
optim = torch.optim.Adam(params=[A], lr=config['A_lr'])
G_batch_size = max(config['G_batch_size'], config['batch_size'])
(z_, y_) = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
(fixed_z, fixed_y) = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
(interp_z, interp_y) = utils.prepare_z_y(config['n_samples'], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
interp_z.sample_()
interp_y.sample_()
if (config['fix_class'] is not None):
y_ = y_.new_full(y_.size(), config['fix_class'])
fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class'])
interp_y = interp_y.new_full(interp_y.size(), config['fix_class'])
print(('Beginning training at epoch %d...' % state_dict['epoch']))
iters_per_epoch = 1000
dummy_loader = ([None] * iters_per_epoch)
path_size = config['path_size']
direction_indicators = torch.eye(config['ndirs']).to(device)
G.eval()
G.module.optim = optim
writer = SummaryWriter(('%s/%s' % (config['logs_root'], experiment_name)))
sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name)
writer.add_image('samples', sample_sheet, 0)
interp_y_ = G.module.G.shared(interp_y)
Q = (pad(fast_gram_schmidt(A)) if (not config['no_ortho']) else pad(A))
if config['vis_during_training']:
print('Generating initial visualizations...')
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video(('G_ema/w%03d' % w_ix), interp_vis[w_ix], 0, fps=24)
for epoch in range(state_dict['epoch'], config['num_epochs']):
if (config['pbar'] == 'mine'):
pbar = utils.progress(dummy_loader, displaytype=('s1k' if config['use_multiepoch_sampler'] else 'eta'))
else:
pbar = tqdm(dummy_loader)
for (i, _) in enumerate(pbar):
state_dict['itr'] += 1
z_.sample_()
if (config['fix_class'] is None):
y_.sample_()
y = G.module.G.shared(y_)
sampled_directions = torch.randint(low=0, high=config['ndirs'], size=(G_batch_size,), device=device)
distances = torch.rand(G_batch_size, 1, device=device).mul((2 * path_size)).add((- path_size))
w_sampled = (direction_indicators[sampled_directions] * distances)
penalty = G(z_, y, w=w_sampled, Q=Q.repeat(num_gpus, 1)).mean()
optim.zero_grad()
penalty.backward()
optim.step()
Q = (pad(fast_gram_schmidt(A)) if (not config['no_ortho']) else pad(A))
cur_training_iter = ((epoch * iters_per_epoch) + i)
writer.add_scalar('Metrics/hessian_penalty', penalty.item(), cur_training_iter)
writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter)
if (not (state_dict['itr'] % config['save_every'])):
torch.save(A.cpu().detach(), ('%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter)))
if config['vis_during_training']:
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video(('G_ema/w%03d' % w_ix), interp_vis[w_ix], cur_training_iter, fps=24)
state_dict['epoch'] += 1 |
def detect_traits(name=None, alias=None, filetype=None):
result = []
if filetype:
filetype = filetype.lstrip('.')
theme = config.traits_by_alias.get(alias)
if (alias and theme):
result = [theme, (filetype or 'other')]
elif (filetype in KIND_AUDIO):
result = ['audio', filetype]
elif (filetype in KIND_VIDEO):
result = ['video', filetype]
contents = name_trait(name)
if contents:
result = [contents, filetype]
elif (filetype in KIND_IMAGE):
result = ['img', filetype]
elif (filetype in KIND_DOCS):
result = ['docs', filetype]
elif (filetype in KIND_ARCHIVE):
result = ['misc', filetype]
contents = name_trait(name)
if contents:
result = [contents, filetype]
return result |
class SmtLibScript(object):
def __init__(self):
self.annotations = None
self.commands = []
def add(self, name, args):
self.add_command(SmtLibCommand(name=name, args=args))
def add_command(self, command):
self.commands.append(command)
def evaluate(self, solver):
log = []
for cmd in self.commands:
r = evaluate_command(cmd, solver)
log.append((cmd.name, r))
return log
def contains_command(self, command_name):
return any(((x.name == command_name) for x in self.commands))
def count_command_occurrences(self, command_name):
return sum((1 for cmd in self.commands if (cmd.name == command_name)))
def filter_by_command_name(self, command_name_set):
return (cmd for cmd in self.commands if (cmd.name in command_name_set))
def get_strict_formula(self, mgr=None):
if (self.contains_command(smtcmd.PUSH) or self.contains_command(smtcmd.POP)):
raise PysmtValueError('Was not expecting push-pop commands')
if (self.count_command_occurrences(smtcmd.CHECK_SAT) != 1):
raise PysmtValueError('Was expecting exactly one check-sat command')
_And = (mgr.And if mgr else get_env().formula_manager.And)
assertions = [cmd.args[0] for cmd in self.filter_by_command_name([smtcmd.ASSERT])]
return _And(assertions)
def get_declared_symbols(self):
return {cmd.args[0] for cmd in self.filter_by_command_name([smtcmd.DECLARE_CONST, smtcmd.DECLARE_FUN])}
def get_define_fun_parameter_symbols(self):
res = set()
for cmd in self.filter_by_command_name([smtcmd.DEFINE_FUN]):
for s in cmd.args[1]:
res.add(s)
return res
def get_last_formula(self, mgr=None):
stack = []
backtrack = []
_And = (mgr.And if mgr else get_env().formula_manager.And)
for cmd in self.commands:
if (cmd.name == smtcmd.ASSERT):
stack.append(cmd.args[0])
if (cmd.name == smtcmd.RESET_ASSERTIONS):
stack = []
backtrack = []
elif (cmd.name == smtcmd.PUSH):
for _ in range(cmd.args[0]):
backtrack.append(len(stack))
elif (cmd.name == smtcmd.POP):
for _ in range(cmd.args[0]):
l = backtrack.pop()
stack = stack[:l]
return _And(stack)
def to_file(self, fname, daggify=True):
with open(fname, 'w') as outstream:
self.serialize(outstream, daggify=daggify)
def serialize(self, outstream, daggify=True):
if daggify:
printer = SmtDagPrinter(outstream, annotations=self.annotations)
else:
printer = SmtPrinter(outstream, annotations=self.annotations)
for cmd in self.commands:
cmd.serialize(printer=printer)
outstream.write('\n')
def __len__(self):
return len(self.commands)
def __iter__(self):
return iter(self.commands)
def __str__(self):
return '\n'.join((str(cmd) for cmd in self.commands)) |
def compute_particle_residual(particle_qd_0: wp.array(dtype=wp.vec3), particle_qd_1: wp.array(dtype=wp.vec3), particle_f: wp.array(dtype=wp.vec3), particle_m: wp.array(dtype=float), gravity: wp.vec3, dt: float, residual: wp.array(dtype=wp.vec3)):
tid = wp.tid()
m = particle_m[tid]
v1 = particle_qd_1[tid]
v0 = particle_qd_0[tid]
f = particle_f[tid]
err = wp.vec3()
if (m > 0.0):
err = ((((v1 - v0) * m) - (f * dt)) - ((gravity * dt) * m))
residual[tid] = err |
def process_docstring(app, what, name, obj, options, lines):
if ((what == 'class') and issubclass(obj, pyunity.Component)):
indexes = []
for (i, line) in enumerate(lines):
if line.startswith('.. attribute:: '):
indexes.append(i)
for index in reversed(indexes):
name = lines[index].split('::', 1)[1][1:]
if (name in obj._saved):
val = str(obj._saved[name].default)
lines.insert((index + 1), (' :value: ' + val)) |
class InputInjection(nn.Module):
def __init__(self, downsamplingRatio):
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, downsamplingRatio):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, input):
for pool in self.pool:
input = pool(input)
return input |
class BeatGUI(AObject):
def AOBJECT_TYPE(self):
return 'BeatGUI'
def __init__(self, media=None, path=None, clear_temp=None):
AObject.__init__(self, path=path)
if (media is not None):
self.media = media
def initializeBlank(self):
AObject.initializeBlank(self)
self._widget = None
self._media = None
def getJSONName(self):
return (self.AOBJECT_TYPE() + '.json')
def media(self):
return self._getMedia()
def _getMedia(self):
return self._media
def media(self, value):
self._setMedia(value)
def _setMedia(self, value):
self._media = value
def media_type(self):
return self._getMediaType()
def _getMediaType(self):
if (self.media is None):
return None
else:
return self.media.AObjectType()
def widget(self):
return self._getWidget()
def _getWidget(self):
if (self._widget is None):
self._widget = Viewer.VBVSignal()
return self._widget
def widget(self, value):
self._setWidget(value)
def _setWidget(self, value):
self._widget = value
def frame_rate(self):
return self._getFrameRate()
def _getFrameRate(self):
gfr = self.widget.frame_rate
if (gfr is None):
media = self.media
if (media is not None):
gfr = media.getFrameRate()
return gfr
_rate.setter
def frame_rate(self, value):
self._setFrameRate(value)
def _setFrameRate(self, value):
self.widget.frame_rate = float(value)
def frame_offset(self):
return self._getFrameOffset()
def _getFrameOffset(self):
return self.widget.frame_offset
_offset.setter
def frame_offset(self, value):
self._setFrameOffset(value)
def _setFrameOffset(self, value):
self.widget.frame_offset = value
def run(self, local_saliency=None, frame_rate=None, eventlist='default', frame_offset=None):
if (frame_rate is None):
self.frame_rate = self.media._getFrameRate()
else:
self.frame_rate = frame_rate
if (local_saliency is None):
self.widget.signal = self.media.getLocalRhythmicSaliency().tolist()
else:
self.widget.signal = local_saliency.tolist()
if (frame_offset is None):
self.frame_offset = 0
elif (frame_offset is 'guess'):
self.frame_offset = self.guessFrameOffset()
else:
self.frame_offset = frame_offset
if (eventlist is None):
self.widget.events = []
elif (eventlist == 'default'):
self.widget.events = EventList._toGUIDicts(self.media.getEventList())
else:
self.widget.events = EventList._toGUIDicts(eventlist)
self.widget.data_string = self.media.getStringForHTMLStreamingBase64()
return self.widget
def guessFrameOffset(self):
if isinstance(self.media, Video):
return (self.media.reader.get_length() - self.media.n_frames())
else:
return 0
def deactivateAllEvents(self):
newes = []
gevents = self.getEventDicts()
for e in gevents:
newe = e
newe['is_active'] = 0
newes.append(newe)
self.widget.events = []
self.widget.events = newes
def activateAllEvents(self):
newes = []
gevents = self.getEventDicts()
for e in gevents:
newe = e
newe['is_active'] = 1
newes.append(newe)
self.widget.events = []
self.widget.events = newes
def activatePattern(self, pattern=None, prefix=None, apply_to_active=None):
assert pattern, 'must provide pattern to activate'
newes = []
gevents = self.getGUIEventDicts()
counter = 0
prefix_length = 0
if (prefix_length is not None):
prefix_length = len(prefix)
for (i, e) in enumerate(gevents):
if apply_to_active:
if e.get('is_active'):
if (counter < prefix_length):
e['is_active'] = prefix[counter]
else:
e['is_active'] = pattern[((counter - prefix_length) % len(pattern))]
counter = (counter + 1)
else:
print('Skipping beat {}, inactive'.format(i))
elif (i < prefix_length):
e['is_active'] = prefix[i]
else:
e['is_active'] = pattern[((i - prefix_length) % len(pattern))]
newes.append(e)
self.widget.events = []
self.widget.events = newes
def shiftEventsByNFrames(self, n_frames=None):
assert n_frames, 'must provide number of frames to shift by'
newes = []
gevents = self.getEventDicts()
sample_step = np.true_divide(1.0, self.getFrameRate())
for e in gevents:
newe = e
newe['start'] = (newe['start'] + (n_frames * sample_step))
newes.append(newe)
self.widget.events = []
self.widget.events = newes
def getActiveEventTimes(self):
gevents = self.getEventDicts(active_only=True)
revents = []
for e in gevents:
revents.append(e.get('time'))
return np.asarray(revents)
def getEventTimes(self):
gevents = self.getEventDicts()
revents = []
for e in gevents:
revents.append(e.t)
return np.asarray(revents)
def getEvents(self, active_only=None):
return Event._FromGUIDicts(self.getEventDicts(active_only=active_only))
def getEventList(self, active_only=None):
elist = EventList._FromGUIDicts(self.getEventDicts(active_only=active_only))
elist.setInfo(label='html_frame_offset', value=self.getFrameOffset())
return elist
def getActiveEvents(self):
return self.getEvents(active_only=True)
def getEventDicts(self, active_only=None):
gevents = self.widget.events[:]
if (not active_only):
return gevents
else:
nevents = []
for e in gevents:
if e.get('is_active'):
nevents.append(e)
return nevents
def saveEvents(self, save_path=None):
elist = self.getEventList(active_only=False)
if (save_path is not None):
elist.writeToJSON(json_path=save_path)
self.widget.last_save_path = save_path
else:
save_path = self.widget.last_save_path
if (save_path is None):
save_path = uiGetSaveFilePath(file_extension='.json')
if (save_path is not None):
elist.writeToJSON(json_path=save_path)
self.widget.last_save_path = save_path
def saveEventsAs(self, save_path=None):
elist = self.getEventList(active_only=False)
if (save_path is not None):
elist.writeToJSON(json_path=save_path)
self.widget.last_save_path = save_path
print('savepath not none {}'.format(save_path))
else:
save_path = uiGetSaveFilePath(file_extension='.json')
print('savepath from ui {}'.format(save_path))
if (save_path is not None):
print('save path from ui {}'.format(save_path))
elist.writeToJSON(json_path=save_path)
self.widget.last_save_path = save_path
print(save_path)
def setEvents(self, events):
self.widget.events = Event._ToGUIDicts(events)
def setEventList(self, event_list):
if (event_list.getInfo('html_frame_offset') is not None):
self.widget.frame_offset = event_list.getInfo('html_frame_offset')
self.widget.events = event_list._toGUIDicts()
def loadEvents(self, load_path=None):
if (load_path is None):
load_path = uiGetFilePath()
elist = EventList()
elist.loadFromJSON(json_path=load_path)
self.setEventList(event_list=elist)
def getEventListWithSelectedSegments(self):
eventlist = self.getEventList()
events = eventlist.events
segments = []
for (i, e) in enumerate(events):
if (e.direction > (- 1)):
newseg = []
for si in range(i, len(events)):
newseg.append(si)
if (events[si].direction < 0):
break
segments.append(newseg)
eventlist.setInfo(label='selected_segments', value=segments)
return eventlist |
class Timer():
def __init__(self):
self.label = pyglet.text.Label('00:00', font_size=360, x=(window.width // 2), y=(window.height // 2), anchor_x='center', anchor_y='center')
self.reset()
def reset(self):
self.time = 0
self.running = False
self.label.text = '00:00'
self.label.color = (255, 255, 255, 255)
def update(self, dt):
if self.running:
self.time += dt
(m, s) = divmod(self.time, 60)
self.label.text = f'{round(m):02}:{round(s):02}'
if (m >= 5):
self.label.color = (180, 0, 0, 255) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.