code stringlengths 281 23.7M |
|---|
def inline_small_list(sizemax=11, sizemin=0, immutable=False, unbox_num=False, nonull=False, attrname='list', factoryname='make', listgettername='_get_full_list', listsizename='_get_size_list', gettername='_get_list', settername='_set_list'):
if (not config.type_size_specialization):
sizemin = sizemax = 0
unbox_num = False
def wrapper(cls):
_immutable_ = getattr(cls, '_immutable_', False)
def make_class(size):
attrs = [('_%s_%s' % (attrname, i)) for i in range(size)]
unrolling_enumerate_attrs = unrolling_iterable(enumerate(attrs))
def _get_size_list(self):
return size
def _get_list(self, i):
for (j, attr) in unrolling_enumerate_attrs:
if (j == i):
result = getattr(self, attr)
if nonull:
debug.check_annotation(result, _not_null)
return result
raise IndexError
def _get_full_list(self):
res = ([None] * size)
for (i, attr) in unrolling_enumerate_attrs:
elem = getattr(self, attr)
if nonull:
debug.check_annotation(elem, _not_null)
res[i] = getattr(self, attr)
return res
def _set_list(self, i, val):
if nonull:
assert (val is not None)
for (j, attr) in unrolling_enumerate_attrs:
if (j == i):
setattr(self, attr, val)
return
raise IndexError
def _init(self, elems, *args):
assert (len(elems) == size)
for (i, attr) in unrolling_enumerate_attrs:
val = elems[i]
if nonull:
assert (val is not None)
setattr(self, attr, elems[i])
cls.__init__(self, *args)
methods = {gettername: _get_list, listsizename: _get_size_list, listgettername: _get_full_list, settername: _set_list, '__init__': _init}
newcls = type(cls)(('%sSize%s' % (cls.__name__, size)), (cls,), methods)
if _immutable_:
setattr(newcls, '_immutable_', True)
newcls = add_clone_method(newcls)
if immutable:
setattr(newcls, '_immutable_fields_', attrs)
newcls = add_clone_method(newcls)
if ('_attrs_' in cls.__dict__):
setattr(newcls, '_attrs_', attrs)
return newcls
classes = map(make_class, range(sizemin, sizemax))
def _get_arbitrary(self, i):
return getattr(self, attrname)[i]
def _get_size_list_arbitrary(self):
return len(getattr(self, attrname))
def _get_list_arbitrary(self):
return getattr(self, attrname)
def _set_arbitrary(self, i, val):
if nonull:
assert (val is not None)
getattr(self, attrname)[i] = val
def _init(self, elems, *args):
debug.make_sure_not_resized(elems)
setattr(self, attrname, elems)
cls.__init__(self, *args)
methods = {gettername: _get_arbitrary, listsizename: _get_size_list_arbitrary, listgettername: _get_list_arbitrary, settername: _set_arbitrary, '__init__': _init}
cls_arbitrary = type(cls)(('%sArbitrary' % cls.__name__), (cls,), methods)
if _immutable_:
setattr(cls_arbitrary, '_immutable_', True)
cls_arbitrary = add_clone_method(cls_arbitrary)
if immutable:
setattr(cls_arbitrary, '_immutable_fields_', [('%s[*]' % (attrname,))])
cls_arbitrary = add_clone_method(cls_arbitrary)
if ('_attrs_' in cls.__dict__):
setattr(cls_arbitrary, '_attrs_', attrname)
def make(elems, *args):
if classes:
if ((elems is None) or (len(elems) == 0)):
return make0(*args)
elif (elems is None):
elems = []
if (sizemin <= len(elems) < sizemax):
cls = classes[(len(elems) - sizemin)]
else:
cls = cls_arbitrary
return cls(elems, *args)
def make0(*args):
if (not classes):
return make([], *args)
result = objectmodel.instantiate(classes[0])
cls.__init__(result, *args)
return result
def make1(elem, *args):
if (not classes):
return make([elem], *args)
result = objectmodel.instantiate(classes[1])
result._set_list(0, elem)
cls.__init__(result, *args)
return result
def make2(elem1, elem2, *args):
if (not classes):
return make([elem1, elem2], *args)
result = objectmodel.instantiate(classes[2])
result._set_list(0, elem1)
result._set_list(1, elem2)
cls.__init__(result, *args)
return result
def make_n(size, *args):
if (sizemin <= size < sizemax):
subcls = classes[(size - sizemin)]
else:
subcls = cls_arbitrary
result = objectmodel.instantiate(subcls)
if (subcls is cls_arbitrary):
assert isinstance(result, subcls)
setattr(result, attrname, ([None] * size))
cls.__init__(result, *args)
return result
if unbox_num:
assert (_immutable_ or immutable), 'unboxing is only supported for immutable objects'
(make, make1, make2) = _add_num_classes(cls, make, make0, make1, make2, immut=_immutable_)
setattr(cls, factoryname, staticmethod(make))
setattr(cls, (factoryname + '0'), staticmethod(make0))
setattr(cls, (factoryname + '1'), staticmethod(make1))
setattr(cls, (factoryname + '2'), staticmethod(make2))
setattr(cls, (factoryname + '_n'), staticmethod(make_n))
return cls
return wrapper |
def find_singledispatch_register_impls(modules: list[MypyFile], errors: Errors) -> SingledispatchInfo:
visitor = SingledispatchVisitor(errors)
for module in modules:
visitor.current_path = module.path
module.accept(visitor)
return SingledispatchInfo(visitor.singledispatch_impls, visitor.decorators_to_remove) |
.parametrize('bitsize', [3, 4, 5])
def test_hamming_weight_compute(bitsize: int):
gate = HammingWeightCompute(bitsize=bitsize)
gate_inv = (gate ** (- 1))
assert_decompose_is_consistent_with_t_complexity(gate)
assert_decompose_is_consistent_with_t_complexity(gate_inv)
assert_valid_bloq_decomposition(gate)
assert_valid_bloq_decomposition(gate_inv)
junk_bitsize = (bitsize - bitsize.bit_count())
out_bitsize = bitsize.bit_length()
sim = cirq.Simulator()
op = GateHelper(gate).operation
circuit = cirq.Circuit(cirq.decompose_once(op))
circuit_with_inv = (circuit + cirq.Circuit(cirq.decompose_once((op ** (- 1)))))
qubit_order = sorted(circuit_with_inv.all_qubits())
for inp in range((2 ** bitsize)):
input_state = (([0] * (junk_bitsize + out_bitsize)) + list(iter_bits(inp, bitsize)))
result = sim.simulate(circuit, initial_state=input_state).dirac_notation()
actual_bits = result[(1 + junk_bitsize):((1 + junk_bitsize) + out_bitsize)]
assert (actual_bits == f'{inp.bit_count():0{out_bitsize}b}')
assert_circuit_inp_out_cirqsim(circuit_with_inv, qubit_order, input_state, input_state) |
class FiduceoMviriFullFcdrFileHandler(FiduceoMviriBase):
nc_keys = FiduceoMviriBase.nc_keys.copy()
nc_keys['VIS'] = 'count_vis'
def _get_calib_coefs(self):
coefs = super()._get_calib_coefs()
coefs['VIS'].update({'years_since_launch': np.float32(self.nc['years_since_launch']), 'a0': np.float32(self.nc['a0_vis']), 'a1': np.float32(self.nc['a1_vis']), 'a2': np.float32(self.nc['a2_vis']), 'mean_count_space': np.float32(self.nc['mean_count_space_vis'])})
return coefs
def _calibrate_vis(self, ds, channel, calibration):
sza = None
if (calibration == 'reflectance'):
sza = self._get_angles('solar_zenith_angle', HIGH_RESOL)
cal = VISCalibrator(self.calib_coefs[channel], sza)
return cal.calibrate(ds, calibration) |
class KazooTreeCacheTests(KazooAdaptiveHandlerTestCase):
def setUp(self):
super(KazooTreeCacheTests, self).setUp()
self._event_queue = self.client.handler.queue_impl()
self._error_queue = self.client.handler.queue_impl()
self.path = None
self.cache = None
def tearDown(self):
if (not self._error_queue.empty()):
try:
raise self._error_queue.get()
except FakeException:
pass
if (self.cache is not None):
self.cache.close()
self.cache = None
super(KazooTreeCacheTests, self).tearDown()
def make_cache(self):
if (self.cache is None):
self.path = ('/' + uuid.uuid4().hex)
self.cache = TreeCache(self.client, self.path)
self.cache.listen((lambda event: self._event_queue.put(event)))
self.cache.listen_fault((lambda error: self._error_queue.put(error)))
self.cache.start()
return self.cache
def wait_cache(self, expect=None, since=None, timeout=10):
started = (since is None)
while True:
event = self._event_queue.get(timeout=timeout)
if started:
if (expect is not None):
assert (event.event_type == expect)
return event
if (event.event_type == since):
started = True
if (expect is None):
return
def spy_client(self, method_name):
method = getattr(self.client, method_name)
return patch.object(self.client, method_name, wraps=method)
def _wait_gc(self):
self.client.handler.sleep_func(0.1)
completion_queue = getattr(self.handler, 'completion_queue', None)
if (completion_queue is not None):
while (not self.client.handler.completion_queue.empty()):
self.client.handler.sleep_func(0.1)
for gen in range(3):
gc.collect(gen)
def count_tree_node(self):
for retry in range(10):
result = set()
for _ in range(5):
self._wait_gc()
result.add(count_refs_by_type('TreeNode'))
if (len(result) == 1):
return list(result)[0]
raise RuntimeError('could not count refs exactly')
def test_start(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
stat = self.client.exists(self.path)
assert (stat.version == 0)
assert (self.cache._state == TreeCache.STATE_STARTED)
assert (self.cache._root._state == TreeNode.STATE_LIVE)
def test_start_started(self):
self.make_cache()
with pytest.raises(KazooException):
self.cache.start()
def test_start_closed(self):
self.make_cache()
self.cache.close()
with pytest.raises(KazooException):
self.cache.start()
def test_close(self):
assert (self.count_tree_node() == 0)
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.create((self.path + '/foo/bar/baz'), makepath=True)
for _ in range(3):
self.wait_cache(TreeEvent.NODE_ADDED)
stub_data_watcher = Mock(spec=(lambda event: None))
stub_child_watcher = Mock(spec=(lambda event: None))
self.client.get((self.path + '/foo'), stub_data_watcher)
self.client.get_children((self.path + '/foo'), stub_child_watcher)
root_path = (self.client.chroot + self.path)
assert (len(self.client._data_watchers[(root_path + '/foo')]) == 2)
assert (len(self.client._data_watchers[(root_path + '/foo/bar')]) == 1)
assert (len(self.client._data_watchers[(root_path + '/foo/bar/baz')]) == 1)
assert (len(self.client._child_watchers[(root_path + '/foo')]) == 2)
assert (len(self.client._child_watchers[(root_path + '/foo/bar')]) == 1)
assert (len(self.client._child_watchers[(root_path + '/foo/bar/baz')]) == 1)
self.cache.close()
assert self._event_queue.empty()
assert (self.cache._root._children == {})
assert (self.cache._root._data is None)
assert (self.cache._state == TreeCache.STATE_CLOSED)
assert (self.cache._root._state != TreeNode.STATE_DEAD)
assert (len(self.client._data_watchers[(root_path + '/foo')]) == 1)
assert (len(self.client._data_watchers[(root_path + '/foo/bar')]) == 0)
assert (len(self.client._data_watchers[(root_path + '/foo/bar/baz')]) == 0)
assert (len(self.client._child_watchers[(root_path + '/foo')]) == 1)
assert (len(self.client._child_watchers[(root_path + '/foo/bar')]) == 0)
assert (len(self.client._child_watchers[(root_path + '/foo/bar/baz')]) == 0)
assert (list(self.client._data_watchers[(root_path + '/foo')])[0] == stub_data_watcher)
assert (list(self.client._child_watchers[(root_path + '/foo')])[0] == stub_child_watcher)
self.cache = None
assert (self.count_tree_node() == 0)
def test_delete_operation(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
assert (self.count_tree_node() == 1)
self.client.create((self.path + '/foo/bar/baz'), makepath=True)
for _ in range(3):
self.wait_cache(TreeEvent.NODE_ADDED)
self.client.delete((self.path + '/foo'), recursive=True)
for _ in range(3):
self.wait_cache(TreeEvent.NODE_REMOVED)
assert (self.cache._root._children == {})
root_path = (self.client.chroot + self.path)
assert (self.client._data_watchers[(root_path + '/foo')] == set())
assert (self.client._data_watchers[(root_path + '/foo/bar')] == set())
assert (self.client._data_watchers[(root_path + '/foo/bar/baz')] == set())
assert (self.client._child_watchers[(root_path + '/foo')] == set())
assert (self.client._child_watchers[(root_path + '/foo/bar')] == set())
assert (self.client._child_watchers[(root_path + '/foo/bar/baz')] == set())
assert (self.count_tree_node() == 1)
def test_children_operation(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.create((self.path + '/test_children'), b'test_children_1')
event = self.wait_cache(TreeEvent.NODE_ADDED)
assert (event.event_type == TreeEvent.NODE_ADDED)
assert (event.event_data.path == (self.path + '/test_children'))
assert (event.event_data.data == b'test_children_1')
assert (event.event_data.stat.version == 0)
self.client.set((self.path + '/test_children'), b'test_children_2')
event = self.wait_cache(TreeEvent.NODE_UPDATED)
assert (event.event_type == TreeEvent.NODE_UPDATED)
assert (event.event_data.path == (self.path + '/test_children'))
assert (event.event_data.data == b'test_children_2')
assert (event.event_data.stat.version == 1)
self.client.delete((self.path + '/test_children'))
event = self.wait_cache(TreeEvent.NODE_REMOVED)
assert (event.event_type == TreeEvent.NODE_REMOVED)
assert (event.event_data.path == (self.path + '/test_children'))
assert (event.event_data.data == b'test_children_2')
assert (event.event_data.stat.version == 1)
def test_subtree_operation(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.create((self.path + '/foo/bar/baz'), makepath=True)
for relative_path in ('/foo', '/foo/bar', '/foo/bar/baz'):
event = self.wait_cache(TreeEvent.NODE_ADDED)
assert (event.event_type == TreeEvent.NODE_ADDED)
assert (event.event_data.path == (self.path + relative_path))
assert (event.event_data.data == b'')
assert (event.event_data.stat.version == 0)
self.client.delete((self.path + '/foo'), recursive=True)
for relative_path in ('/foo/bar/baz', '/foo/bar', '/foo'):
event = self.wait_cache(TreeEvent.NODE_REMOVED)
assert (event.event_type == TreeEvent.NODE_REMOVED)
assert (event.event_data.path == (self.path + relative_path))
def test_get_data(self):
cache = self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.create((self.path + '/foo/bar/baz'), b'', makepath=True)
self.wait_cache(TreeEvent.NODE_ADDED)
self.wait_cache(TreeEvent.NODE_ADDED)
self.wait_cache(TreeEvent.NODE_ADDED)
with patch.object(cache, '_client'):
assert (cache.get_data(self.path).data == b'')
assert (cache.get_data(self.path).stat.version == 0)
assert (cache.get_data((self.path + '/foo')).data == b'')
assert (cache.get_data((self.path + '/foo')).stat.version == 0)
assert (cache.get_data((self.path + '/foo/bar')).data == b'')
assert (cache.get_data((self.path + '/foo/bar')).stat.version == 0)
assert (cache.get_data((self.path + '/foo/bar/baz')).data == b'')
assert (cache.get_data((self.path + '/foo/bar/baz')).stat.version == 0)
def test_get_children(self):
cache = self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.create((self.path + '/foo/bar/baz'), b'', makepath=True)
self.wait_cache(TreeEvent.NODE_ADDED)
self.wait_cache(TreeEvent.NODE_ADDED)
self.wait_cache(TreeEvent.NODE_ADDED)
with patch.object(cache, '_client'):
assert (cache.get_children((self.path + '/foo/bar/baz')) == frozenset())
assert (cache.get_children((self.path + '/foo/bar')) == frozenset(['baz']))
assert (cache.get_children((self.path + '/foo')) == frozenset(['bar']))
assert (cache.get_children(self.path) == frozenset(['foo']))
def test_get_data_out_of_tree(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
with pytest.raises(ValueError):
self.cache.get_data('/out_of_tree')
def test_get_children_out_of_tree(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
with pytest.raises(ValueError):
self.cache.get_children('/out_of_tree')
def test_get_data_no_node(self):
cache = self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
with patch.object(cache, '_client'):
assert (cache.get_data((self.path + '/non_exists')) is None)
def test_get_children_no_node(self):
cache = self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
with patch.object(cache, '_client'):
assert (cache.get_children((self.path + '/non_exists')) is None)
def test_session_reconnected(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.create((self.path + '/foo'))
event = self.wait_cache(TreeEvent.NODE_ADDED)
assert (event.event_data.path == (self.path + '/foo'))
with self.spy_client('get_async') as get_data:
with self.spy_client('get_children_async') as get_children:
self.lose_connection(self.client.handler.event_object)
self.wait_cache(TreeEvent.CONNECTION_SUSPENDED)
self.wait_cache(TreeEvent.CONNECTION_RECONNECTED)
while (self.cache._outstanding_ops > 0):
self.client.handler.sleep_func(0.1)
_node_root = self.cache._root
_node_foo = self.cache._root._children['foo']
get_data.assert_has_calls([call(self.path, watch=_node_root._process_watch), call((self.path + '/foo'), watch=_node_foo._process_watch)], any_order=True)
get_children.assert_has_calls([call(self.path, watch=_node_root._process_watch), call((self.path + '/foo'), watch=_node_foo._process_watch)], any_order=True)
def test_root_recreated(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.delete(self.path)
event = self.wait_cache(TreeEvent.NODE_REMOVED)
assert (event.event_type == TreeEvent.NODE_REMOVED)
assert (event.event_data.data == b'')
assert (event.event_data.path == self.path)
assert (event.event_data.stat.version == 0)
self.client.ensure_path(self.path)
event = self.wait_cache(TreeEvent.NODE_ADDED)
assert (event.event_type == TreeEvent.NODE_ADDED)
assert (event.event_data.data == b'')
assert (event.event_data.path == self.path)
assert (event.event_data.stat.version == 0)
assert (self.cache._outstanding_ops >= 0), ('unexpected outstanding ops %r' % self.cache._outstanding_ops)
def test_exception_handler(self):
error_value = FakeException()
error_handler = Mock()
with patch.object(TreeNode, 'on_deleted') as on_deleted:
on_deleted.side_effect = [error_value]
self.make_cache()
self.cache.listen_fault(error_handler)
self.cache.close()
error_handler.assert_called_once_with(error_value)
def test_exception_suppressed(self):
self.make_cache()
self.wait_cache(since=TreeEvent.INITIALIZED)
self.client.stop()
self.client.close()
self.client.handler.start()
self.wait_cache(since=TreeEvent.CONNECTION_LOST)
with patch.object(TreeNode, 'on_created') as on_created:
self.cache._root._call_client('exists', '/')
self.cache._root._call_client('get', '/')
self.cache._root._call_client('get_children', '/')
self.wait_cache(since=TreeEvent.INITIALIZED)
on_created.assert_not_called()
assert (self.cache._outstanding_ops == 0) |
class Core(object):
def __init__(self):
self.features = []
self._features_to_run = OrderedDict()
self._feature_id_lock = Lock()
self._feature_id = 0
self._scenario_id_lock = Lock()
self._scenario_id = 0
def features_to_run(self):
return [f for f in self._features_to_run.values()]
def next_feature_id(self):
with self._feature_id_lock:
self._feature_id += 1
return self._feature_id
def next_scenario_id(self):
with self._scenario_id_lock:
self._scenario_id += 1
return self._scenario_id
def parse_features(self, feature_files, tag_expr):
for featurefile in feature_files:
feature = self.parse_feature(featurefile, tag_expr, featureid=self.next_feature_id)
if (feature is not None):
for scenario in feature.scenarios:
scenario.absolute_id = self.next_scenario_id
self._features_to_run[featurefile] = feature
def parse_feature(self, featurefile, tag_expr, inherited_tags=None, featureid=0):
featureparser = FeatureParser(self, featurefile, featureid, tag_expr, inherited_tags=inherited_tags)
feature = featureparser.parse()
if (feature is None):
return None
self.features.append(feature)
return feature |
class _StochasticFactory():
def parse_distribution(element):
if (element.find('NormalDistribution') is not None):
return NormalDistribution.parse(element.find('NormalDistribution'))
elif (element.find('UniformDistribution') is not None):
return UniformDistribution.parse(element.find('UniformDistribution'))
elif (element.find('PoissonDistribution') is not None):
return PoissonDistribution.parse(element.find('PoissonDistribution'))
elif (element.find('Histogram') is not None):
return Histogram.parse(element.find('Histogram'))
elif (element.find('ProbabilityDistributionSet') is not None):
return ProbabilityDistributionSet.parse(element.find('ProbabilityDistributionSet'))
elif (element.find('UserDefinedDistribution') is not None):
return NotImplementedError('UserDefinedDistribution is not implemented yet.')
else:
raise NotAValidElement('element ', element, 'is not a valid Stochastic distribution') |
def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print('Adding sample versioneer config to setup.cfg', file=sys.stderr)
with open(os.path.join(root, 'setup.cfg'), 'a') as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print((' creating %s' % cfg.versionfile_source))
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source}))
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), '__init__.py')
if os.path.exists(ipy):
try:
with open(ipy, 'r') as f:
old = f.read()
except EnvironmentError:
old = ''
if (INIT_PY_SNIPPET not in old):
print((' appending to %s' % ipy))
with open(ipy, 'a') as f:
f.write(INIT_PY_SNIPPET)
else:
print((' %s unmodified' % ipy))
else:
print((" %s doesn't exist, ok" % ipy))
ipy = None
manifest_in = os.path.join(root, 'MANIFEST.in')
simple_includes = set()
try:
with open(manifest_in, 'r') as f:
for line in f:
if line.startswith('include '):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
if ('versioneer.py' not in simple_includes):
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, 'a') as f:
f.write('include versioneer.py\n')
else:
print(" 'versioneer.py' already in MANIFEST.in")
if (cfg.versionfile_source not in simple_includes):
print((" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source))
with open(manifest_in, 'a') as f:
f.write(('include %s\n' % cfg.versionfile_source))
else:
print(' versionfile_source already in MANIFEST.in')
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0 |
def get_repository_from_config(config_file: str, repository: str, repository_url: Optional[str]=None) -> RepositoryConfig:
if repository_url:
_validate_repository_url(repository_url)
return {'repository': repository_url, 'username': None, 'password': None}
try:
return get_config(config_file)[repository]
except OSError as exc:
raise exceptions.InvalidConfiguration(str(exc))
except KeyError:
raise exceptions.InvalidConfiguration(f'''Missing '{repository}' section from {config_file}.
More info: ''') |
.asyncio(scope='class')
class TestClassScopedLoop():
loop: asyncio.AbstractEventLoop
async def test_remember_loop(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
async def test_this_runs_in_same_loop(self):
assert (asyncio.get_running_loop() is TestClassScopedLoop.loop) |
class Effect6688(BaseEffect):
type = ('projected', 'active')
def handler(fit, container, context, projectionRange, **kwargs):
if ('projected' not in context):
return
if fit.ship.getModifiedItemAttr('disallowAssistance'):
return
if (container.getModifiedItemAttr('maxRange', 0) < (projectionRange or 0)):
return
bonus = container.getModifiedItemAttr('shieldBonus')
duration = (container.getModifiedItemAttr('duration') / 1000.0)
fit._shieldRr.append((bonus, duration)) |
class AppInformation(EventPlugin):
PLUGIN_ID = 'AppInformation'
PLUGIN_NAME = _('Application Information')
PLUGIN_DESC = _('Various information about the application and its environment.')
PLUGIN_CAN_ENABLE = False
PLUGIN_ICON = Icons.PREFERENCES_SYSTEM
def PluginPreferences(self, *args):
vb = Gtk.VBox()
row = 0
grid = Gtk.Grid(column_spacing=12, row_spacing=6)
def label_title(text):
l = Gtk.Label(label=text, xalign=1, yalign=0, wrap=True, justify=Gtk.Justification.RIGHT, selectable=True)
l.get_style_context().add_class(Gtk.STYLE_CLASS_DIM_LABEL)
return l
def label_value(text):
return Gtk.Label(label=text, wrap=True, xalign=0, yalign=0, width_chars=25, selectable=True)
def label_path(path):
text = escape(fsn2text(unexpand(path)))
l = Gtk.Label(label=f"<a href='{fsn2uri(path)}'>{text}</a>", use_markup=True, ellipsize=Pango.EllipsizeMode.MIDDLE, xalign=0, selectable=True)
l.connect('activate-link', show_uri)
return l
grid.insert_row(row)
l = label_title(_('Supported Formats'))
format_names = sorted([t.format for t in formats.types])
v = label_value(', '.join(format_names))
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title(_('Configuration Directory'))
v = label_path(get_user_dir())
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title(_('Cache Directory'))
v = label_path(get_cache_dir())
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title(_('Audio Backend'))
v = label_value(f'''{app.player.name}
{app.player.version_info}''')
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title('Python')
v = label_value(platform.python_version())
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title('Mutagen')
v = label_value(fver(mutagen.version))
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title('Gtk+')
v = label_value('{} ({}, {})'.format(fver(gtk_version), get_backend_name(), get_font_backend_name()))
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
grid.insert_row(row)
l = label_title('PyGObject')
v = label_value(fver(pygobject_version))
grid.attach(l, 0, row, 1, 1)
grid.attach(v, 1, row, 1, 1)
row += 1
vb.pack_start(grid, True, True, 0)
vb.show_all()
return vb |
def save_cache():
global cache
print('Saving cache')
cache['last_run'] = datetime.strftime(datetime.now().replace(hour=0, minute=0, second=0), '%Y-%m-%dT%H:%M:%SZ')
try:
with open(os.path.join(cache_dir, 'cache.pickle'), 'wb') as input_file:
pickle.dump(cache, input_file)
except EnvironmentError:
pass
return 0 |
def extract_current_step(current_status_string):
step_increment = re.search('Step ([0-9]+)/([0-9]+) :', current_status_string)
if step_increment:
return int(step_increment.group(1))
step_increment = re.search('Step ([0-9]+) :', current_status_string)
if step_increment:
return int(step_increment.group(1)) |
def get_relational_data(user_id, item_id, data):
(r0, r1, r2, r3) = ([], [], [], [])
(e1, e2, e3) = ([], [], [])
all_items = data.items.values()
t1 = time()
pos = data.user_positive_list[user_id]
id1 = data.items_traverse[item_id]
movie1 = data.movie_dict[id1]
ru_list = list(pos)
if (item_id in ru_list):
ru_list.remove(item_id)
for another_item in ru_list:
id2 = data.items_traverse[another_item]
movie2 = data.movie_dict[id2]
(shared_genre, shared_director, shared_actor) = get_share_attributes(movie1, movie2)
if (((len(shared_genre) + len(shared_director)) + len(shared_actor)) == 0):
r0.append(another_item)
if (len(shared_genre) != 0):
for value in shared_genre:
r1.append(another_item)
e1.append(value)
if (len(shared_director) != 0):
for value in shared_director:
r2.append(another_item)
e2.append(value)
if (len(shared_actor) != 0):
for value in shared_actor:
r3.append(another_item)
e3.append(value)
t2 = time()
cnt0 = len(r0)
cnt1 = len(r1)
cnt2 = len(r2)
cnt3 = len(r3)
return (r0, r1, r2, r3, e1, e2, e3, cnt0, cnt1, cnt2, cnt3) |
def ql_syscall_terminate_with_payload(ql, pid, reason_namespace, reason_code, payload, payload_size, reason_string):
ql.log.debug(('terminate_with_payload(pid: %d, reason_namespace: 0x%x, reason_code: 0x%x, payload: 0x%x payload_size: 0x%x, reason_string: 0x%x)' % (pid, reason_namespace, reason_code, payload, payload_size, reason_string)))
ql.emu_stop()
return KERN_SUCCESS |
def get_similar_cids(base, MaxRecords):
if (type(base) == int):
base = str(base)
cids = pcp.get_compounds(base, searchtype='similarity', MaxRecords=MaxRecords)
results = []
for x in cids:
print(x.cid)
csd_codes = check_for_ccdc_structures(x.cid)
if (len(csd_codes) > 0):
d = {'cid': x.cid, 'smiles': x.canonical_smiles, 'name': x.iupac_name, 'csd_codes': csd_codes}
results.append(d)
pprint(d)
return results |
class SvgRenderer(Renderer):
def __init__(self, width, height, filename):
self._width = width
self._height = height
if filename.startswith('~'):
filename = os.path.expanduser(filename)
self._filename = filename
def render(self, scene: WorldObject, camera: Camera):
camera.update_projection_matrix()
q = self.get_render_list(scene, camera.camera_matrix)
f = io.StringIO()
f.write(f'''<svg width='{self._width}' height='{self._height}' xmlns='
''')
for wobject in q:
renderfunc = registry.get_render_function(wobject)
if (renderfunc is not None):
res = renderfunc(wobject)
if isinstance(res, str):
f.write(res)
elif isinstance(res, list):
for line in res:
f.write(line)
f.write('\n</svg>\n')
with open(self._filename, 'wb') as f2:
f2.write(f.getvalue().encode())
def get_render_list(self, scene: WorldObject, proj_screen_matrix):
q = []
def visit(wobject):
nonlocal q
if (wobject.visible and hasattr(wobject, 'material')):
q.append(wobject)
scene.traverse(visit)
def sort_func(wobject: WorldObject):
z = la.vec_transform(wobject.world.position, proj_screen_matrix)[2]
return (wobject.render_order, z)
return list(sorted(q, key=sort_func)) |
class OptimizationConfig(FairseqDataclass):
max_epoch: int = field(default=0, metadata={'help': 'force stop training at specified epoch'})
max_update: int = field(default=0, metadata={'help': 'force stop training at specified update'})
stop_time_hours: float = field(default=0, metadata={'help': 'force stop training after specified cumulative time (if >0)'})
clip_norm: float = field(default=0.0, metadata={'help': 'clip threshold of gradients'})
sentence_avg: bool = field(default=False, metadata={'help': 'normalize gradients by the number of sentences in a batch (default is to normalize by number of tokens)'})
update_freq: List[int] = field(default_factory=(lambda : [1]), metadata={'help': 'update parameters every N_i batches, when in epoch i'})
lr: List[float] = field(default_factory=(lambda : [0.25]), metadata={'help': 'learning rate for the first N epochs; all epochs >N using LR_N (note: this may be interpreted differently depending on --lr-scheduler)'})
stop_min_lr: float = field(default=(- 1.0), metadata={'help': 'stop training when the learning rate reaches this minimum'})
use_bmuf: bool = field(default=False, metadata={'help': 'specify global optimizer for syncing models on different GPUs/shards'}) |
class DocStringParser():
def __init__(self, function_name: str) -> None:
self.function_name = function_name
self.state = [STATE_INIT]
self.accumulator = ''
self.arg_type: (str | None) = None
self.arg_name = ''
self.arg_default: (str | None) = None
self.ret_type = 'Any'
self.found = False
self.args: list[ArgSig] = []
self.signatures: list[FunctionSig] = []
def add_token(self, token: tokenize.TokenInfo) -> None:
if ((token.type == tokenize.NAME) and (token.string == self.function_name) and (self.state[(- 1)] == STATE_INIT)):
self.state.append(STATE_FUNCTION_NAME)
elif ((token.type == tokenize.OP) and (token.string == '(') and (self.state[(- 1)] == STATE_FUNCTION_NAME)):
self.state.pop()
self.accumulator = ''
self.found = True
self.state.append(STATE_ARGUMENT_LIST)
elif (self.state[(- 1)] == STATE_FUNCTION_NAME):
self.state.pop()
elif ((token.type == tokenize.OP) and (token.string in ('[', '(', '{')) and (self.state[(- 1)] != STATE_INIT)):
self.accumulator += token.string
self.state.append(STATE_OPEN_BRACKET)
elif ((token.type == tokenize.OP) and (token.string in (']', ')', '}')) and (self.state[(- 1)] == STATE_OPEN_BRACKET)):
self.accumulator += token.string
self.state.pop()
elif ((token.type == tokenize.OP) and (token.string == ':') and (self.state[(- 1)] == STATE_ARGUMENT_LIST)):
self.arg_name = self.accumulator
self.accumulator = ''
self.state.append(STATE_ARGUMENT_TYPE)
elif ((token.type == tokenize.OP) and (token.string == '=') and (self.state[(- 1)] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_TYPE))):
if (self.state[(- 1)] == STATE_ARGUMENT_TYPE):
self.arg_type = self.accumulator
self.state.pop()
else:
self.arg_name = self.accumulator
self.accumulator = ''
self.state.append(STATE_ARGUMENT_DEFAULT)
elif ((token.type == tokenize.OP) and (token.string in (',', ')')) and (self.state[(- 1)] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_DEFAULT, STATE_ARGUMENT_TYPE))):
if (self.state[(- 1)] == STATE_ARGUMENT_DEFAULT):
self.arg_default = self.accumulator
self.state.pop()
elif (self.state[(- 1)] == STATE_ARGUMENT_TYPE):
self.arg_type = self.accumulator
self.state.pop()
elif (self.state[(- 1)] == STATE_ARGUMENT_LIST):
self.arg_name = self.accumulator
if ((not ((token.string == ')') and (self.accumulator.strip() == ''))) and (not _ARG_NAME_RE.match(self.arg_name))):
self.reset()
return
if (token.string == ')'):
self.state.pop()
if self.arg_name:
if (self.arg_type and (not is_valid_type(self.arg_type))):
self.args.append(ArgSig(name=self.arg_name, type=None, default=bool(self.arg_default)))
else:
self.args.append(ArgSig(name=self.arg_name, type=self.arg_type, default=bool(self.arg_default)))
self.arg_name = ''
self.arg_type = None
self.arg_default = None
self.accumulator = ''
elif ((token.type == tokenize.OP) and (token.string == '->') and (self.state[(- 1)] == STATE_INIT)):
self.accumulator = ''
self.state.append(STATE_RETURN_VALUE)
elif ((token.type in (tokenize.NEWLINE, tokenize.ENDMARKER)) and (self.state[(- 1)] in (STATE_INIT, STATE_RETURN_VALUE))):
if (self.state[(- 1)] == STATE_RETURN_VALUE):
if (not is_valid_type(self.accumulator)):
self.reset()
return
self.ret_type = self.accumulator
self.accumulator = ''
self.state.pop()
if self.found:
self.signatures.append(FunctionSig(name=self.function_name, args=self.args, ret_type=self.ret_type))
self.found = False
self.args = []
self.ret_type = 'Any'
else:
self.accumulator += token.string
def reset(self) -> None:
self.state = [STATE_INIT]
self.args = []
self.found = False
self.accumulator = ''
def get_signatures(self) -> list[FunctionSig]:
def has_arg(name: str, signature: FunctionSig) -> bool:
return any(((x.name == name) for x in signature.args))
def args_kwargs(signature: FunctionSig) -> bool:
return (has_arg('*args', signature) and has_arg('**kwargs', signature))
return sorted(self.signatures, key=(lambda x: (1 if args_kwargs(x) else 0))) |
def setup_scene(env, traj_data):
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
object_toggles = traj_data['scene']['object_toggles']
scene_name = ('FloorPlan%d' % scene_num)
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
env.step(dict(traj_data['scene']['init_action'])) |
def use_optimizer(network, params):
if (params['optimizer'] == 'sgd'):
optimizer = torch.optim.SGD(network.parameters(), lr=params['lr'], weight_decay=params['l2_regularization'])
elif (params['optimizer'] == 'adam'):
optimizer = torch.optim.Adam(network.parameters(), lr=params['lr'], betas=params['betas'], weight_decay=params['l2_regularization'], amsgrad=params['amsgrad'])
elif (params['optimizer'] == 'rmsprop'):
optimizer = torch.optim.RMSprop(network.parameters(), lr=params['lr'], alpha=params['alpha'], momentum=params['momentum'], weight_decay=params['l2_regularization'])
return optimizer |
def register_train_step(name):
def register_train_step_fn(func):
if (name in TRAIN_STEP_REGISTRY):
raise ValueError('Cannot register duplicate train step ({})'.format(name))
if (func.__name__ in TRAIN_STEP_NAMES):
raise ValueError('Cannot register task with duplicate train step name ({})'.format(func.__name__))
TRAIN_STEP_REGISTRY[name] = func
TRAIN_STEP_NAMES.add(func.__name__)
return func
return register_train_step_fn |
def train(hparams, scope=None, target_session=''):
params = hparams.values()
for (key, val) in params.items():
hparams.logger.info(((str(key) + ':') + str(val)))
print('load and cache data...')
if (hparams.train_file is not None):
cache_data(hparams, hparams.train_file, flag='train')
if (hparams.eval_file is not None):
cache_data(hparams, hparams.eval_file, flag='eval')
if (hparams.test_file is not None):
cache_data(hparams, hparams.test_file, flag='test')
if (hparams.infer_file is not None):
cache_data(hparams, hparams.infer_file, flag='infer')
if (hparams.model_type == 'deepFM'):
model_creator = DeepfmModel
print('run deepfm model!')
elif (hparams.model_type == 'deepWide'):
model_creator = DeepWideModel
print('run deepWide model!')
elif (hparams.model_type == 'dnn'):
print('run dnn model!')
model_creator = DnnModel
elif (hparams.model_type == 'ipnn'):
print('run ipnn model!')
model_creator = IpnnModel
elif (hparams.model_type == 'opnn'):
print('run opnn model!')
model_creator = OpnnModel
elif (hparams.model_type == 'din'):
print('run din model!')
model_creator = DinModel
elif (hparams.model_type == 'fm'):
print('run fm model!')
model_creator = FmModel
elif (hparams.model_type == 'lr'):
print('run lr model!')
model_creator = LrModel
elif (hparams.model_type == 'din'):
print('run din model!')
model_creator = DinModel
elif (hparams.model_type == 'cccfnet'):
print('run cccfnet model!')
model_creator = CCCFModel
elif (hparams.model_type == 'deepcross'):
print('run deepcross model!')
model_creator = DeepCrossModel
elif (hparams.model_type == 'exDeepFM'):
print('run extreme deepFM model!')
model_creator = ExtremeDeepFMModel
elif (hparams.model_type == 'cross'):
print('run extreme cross model!')
model_creator = CrossModel
elif (hparams.model_type == 'CIN'):
print('run extreme cin model!')
model_creator = CINModel
else:
raise ValueError('model type should be cccfnet, deepFM, deepWide, dnn, fm, lr, ipnn, opnn, din')
train_model = create_train_model(model_creator, hparams, scope)
gpuconfig = tf.ConfigProto()
gpuconfig.gpu_options.allow_growth = True
tf.set_random_seed(1234)
train_sess = tf.Session(target=target_session, graph=train_model.graph, config=gpuconfig)
train_sess.run(train_model.model.init_op)
if (not (hparams.load_model_name is None)):
checkpoint_path = hparams.load_model_name
try:
train_model.model.saver.restore(train_sess, checkpoint_path)
print('load model', checkpoint_path)
except:
raise IOError('Failed to find any matching files for {0}'.format(checkpoint_path))
print('total_loss = data_loss+regularization_loss, data_loss = {rmse or logloss ..}')
writer = tf.summary.FileWriter(util.SUMMARIES_DIR, train_sess.graph)
last_eval = 0
for epoch in range(hparams.epochs):
step = 0
train_sess.run(train_model.iterator.initializer, feed_dict={train_model.filenames: [hparams.train_file_cache]})
epoch_loss = 0
train_start = time.time()
train_load_time = 0
while True:
try:
t1 = time.time()
step_result = train_model.model.train(train_sess)
t3 = time.time()
train_load_time += (t3 - t1)
(_, step_loss, step_data_loss, summary) = step_result
writer.add_summary(summary, step)
epoch_loss += step_loss
step += 1
if ((step % hparams.show_step) == 0):
print('step {0:d} , total_loss: {1:.4f}, data_loss: {2:.4f}'.format(step, step_loss, step_data_loss))
except tf.errors.OutOfRangeError:
print('finish one epoch!')
break
train_end = time.time()
train_time = (train_end - train_start)
if ((epoch % hparams.save_epoch) == 0):
checkpoint_path = train_model.model.saver.save(sess=train_sess, save_path=((util.MODEL_DIR + 'epoch_') + str(epoch)))
train_res = dict()
train_res['loss'] = (epoch_loss / step)
eval_start = time.time()
eval_res = run_eval(train_model, train_sess, hparams.eval_file_cache, util.EVAL_NUM, hparams, flag='eval')
train_info = ', '.join([((str(item[0]) + ':') + str(item[1])) for item in sorted(train_res.items(), key=(lambda x: x[0]))])
eval_info = ', '.join([((str(item[0]) + ':') + str(item[1])) for item in sorted(eval_res.items(), key=(lambda x: x[0]))])
if (hparams.test_file is not None):
test_res = run_eval(train_model, train_sess, hparams.test_file_cache, util.TEST_NUM, hparams, flag='test')
test_info = ', '.join([((str(item[0]) + ':') + str(item[1])) for item in sorted(test_res.items(), key=(lambda x: x[0]))])
eval_end = time.time()
eval_time = (eval_end - eval_start)
if (hparams.test_file is not None):
print((((((('at epoch {0:d}'.format(epoch) + ' train info: ') + train_info) + ' eval info: ') + eval_info) + ' test info: ') + test_info))
hparams.logger.info((((((('at epoch {0:d}'.format(epoch) + ' train info: ') + train_info) + ' eval info: ') + eval_info) + ' test info: ') + test_info))
else:
print((((('at epoch {0:d}'.format(epoch) + ' train info: ') + train_info) + ' eval info: ') + eval_info))
hparams.logger.info((((('at epoch {0:d}'.format(epoch) + ' train info: ') + train_info) + ' eval info: ') + eval_info))
print('at epoch {0:d} , train time: {1:.1f} eval time: {2:.1f}'.format(epoch, train_time, eval_time))
hparams.logger.info('at epoch {0:d} , train time: {1:.1f} eval time: {2:.1f}'.format(epoch, train_time, eval_time))
hparams.logger.info('\n')
if ((eval_res['auc'] - last_eval) < (- 0.003)):
break
if (eval_res['auc'] > last_eval):
last_eval = eval_res['auc']
writer.close()
if (hparams.infer_file is not None):
run_infer(train_model, train_sess, hparams.infer_file_cache, hparams, util.INFER_NUM) |
class StubClass():
def __init__(self, orig, check_attributes_also=False):
self.orig = orig
self.check_attributes_also = check_attributes_also
def __call__(self, stub):
for attribute_name in dir(stub):
self.check_compliance(stub, attribute_name)
stub._stubbed_class = self.orig
return stub
def find_in_dict(self, stub, attribute_name):
for class_ in stub.__mro__:
try:
return class_.__dict__[attribute_name]
except KeyError:
pass
return None
def check_compliance(self, stub, attribute_name):
if attribute_name.startswith('_'):
return
attribute = getattr(stub, attribute_name)
possible_descriptor = self.find_in_dict(stub, attribute_name)
if isinstance(possible_descriptor, StubbleDescriptor):
possible_descriptor.name = attribute_name
possible_descriptor.stubble_check(None, self.orig, stub)
if possible_descriptor.is_instance_only():
delattr(stub, attribute_name)
return
try:
orig_attribute = getattr(self.orig, attribute_name)
except AttributeError:
if (not self.check_attributes_also):
if ((not isinstance(attribute, Callable)) and (not isinstance(attribute, property))):
return
message = ('attribute mismatch: %s.%s does not exist on %s' % (stub, attribute_name, self.orig))
raise AssertionError(message)
if (orig_attribute is attribute):
return
self.types_match(stub, orig_attribute, attribute)
if (inspect.ismethod(orig_attribute) or inspect.isfunction(orig_attribute)):
self.signatures_match(orig_attribute, attribute)
def types_match(self, stub, orig, stubbed):
assert (isinstance(orig, Callable) == isinstance(stubbed, Callable)), ('attribute mismatch: %s.%s is not compatible with the original type %s on %s' % (stub, stubbed, type(orig), self.orig))
def signatures_match(cls, orig, stubbed, ignore_self=False, compare_in_signature=['args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults']):
orig_arguments = inspect.getfullargspec(cls.get_real_func_or_method(orig))
stub_arguments = inspect.getfullargspec(cls.get_real_func_or_method(stubbed))
if ignore_self:
if ('self' in orig_arguments.args):
orig_arguments.args.remove('self')
if ('self' in stub_arguments.args):
stub_arguments.args.remove('self')
orig_arg_dict = orig_arguments._asdict()
stub_arg_dict = stub_arguments._asdict()
def assert_same(key):
orig_declaration = orig_arg_dict[key]
stub_declaration = stub_arg_dict[key]
assert (orig_declaration == stub_declaration), ('signature mismatch on %s for %s: orig(%s) != stub(%s)' % (getattr(stubbed, '__name__', repr(stubbed)), key, orig_declaration, stub_declaration))
for key in compare_in_signature:
assert_same(key)
return False
def get_real_func_or_method(cls, func_or_method):
if hasattr(func_or_method, '__wrapped__'):
return func_or_method.__wrapped__
return func_or_method |
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data['xs'] = [x.pin_memory() for x in data['xs']]
data['ys'] = [y.pin_memory() for y in data['ys']]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return |
def test_prepare_nu_t_counts():
num_bits_p = 6
m_param = (2 ** ((2 * num_bits_p) + 3))
num_bits_m = (m_param - 1).bit_length()
expected_cost = ((((3 * (num_bits_p ** 2)) + num_bits_p) + ((4 * num_bits_m) * (num_bits_p + 1))) + 4)
expected_cost += ((((2 * 4) * (num_bits_p - 1)) + (6 * num_bits_p)) + 2)
eq_90 = ((((3 * (num_bits_p ** 2)) + (15 * num_bits_p)) - 7) + ((4 * num_bits_m) * (num_bits_p + 1)))
assert (expected_cost == (eq_90 + 5))
prep = PrepareNuState(num_bits_p, m_param)
(_, counts) = prep.call_graph()
qual_cost = counts[TGate()]
prep = PrepareNuState(num_bits_p, m_param, adjoint=True)
(_, counts) = prep.call_graph()
qual_cost += counts[TGate()]
qual_cost //= 4
comp_diff = 1
assert (qual_cost == (expected_cost - comp_diff)) |
class AddGaussianLoss(layers.Layer):
def __init__(self, **kwargs):
super(AddGaussianLoss, self).__init__(**kwargs)
self.lamb_kl = self.add_weight(shape=(), name='lamb_kl', trainable=False)
def call(self, inputs):
(mu, std) = inputs
var_dist = tfp.MultivariateNormalDiag(loc=mu, scale_diag=std)
pri_dist = tfp.MultivariateNormalDiag(loc=K.zeros_like(mu), scale_diag=K.ones_like(std))
kl_loss = (self.lamb_kl * K.mean(tfp.kl_divergence(var_dist, pri_dist)))
return kl_loss |
def test_FilterGE():
dm = skc.mkdm(matrix=[[7, 5, 35], [5, 4, 26], [5, 6, 28], [1, 7, 30], [5, 8, 30]], objectives=[max, max, min], weights=[2, 4, 1], alternatives=['PE', 'JN', 'AA', 'MM', 'FN'], criteria=['ROE', 'CAP', 'RI'])
expected = skc.mkdm(matrix=[[7, 5, 35], [5, 6, 28], [5, 8, 30]], objectives=[max, max, min], weights=[2, 4, 1], alternatives=['PE', 'AA', 'FN'], criteria=['ROE', 'CAP', 'RI'])
tfm = filters.FilterGE({'ROE': 2, 'RI': 28})
result = tfm.transform(dm)
assert result.equals(expected) |
_lr_scheduler('triangular')
class TriangularSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with triangular. Consider --lr-scheduler=fixed instead.')
lr = args.lr[0]
assert (args.max_lr > lr), 'max_lr must be more than lr'
self.min_lr = lr
self.max_lr = args.max_lr
self.stepsize = (args.lr_period_updates // 2)
self.lr_shrink = args.lr_shrink
self.shrink_min = args.shrink_min
self.lr = self.min_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period (cycle length)')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing')
parser.add_argument('--shrink-min', action='store_true', help='if set, also shrinks min lr')
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
cycle = math.floor((num_updates / (2 * self.stepsize)))
lr_shrink = (self.lr_shrink ** cycle)
max_lr = (self.max_lr * lr_shrink)
if self.shrink_min:
min_lr = (self.min_lr * lr_shrink)
else:
min_lr = self.min_lr
x = abs((((num_updates / self.stepsize) - (2 * (cycle + 1))) + 1))
self.lr = (min_lr + ((max_lr - min_lr) * max(0, (1 - x))))
self.optimizer.set_lr(self.lr)
return self.lr |
class PublicKey(PublicKeyBase):
TESTNET_VERSION = 111
MAINNET_VERSION = 0
def from_point(p):
return PublicKey(p.x, p.y)
def from_int(i):
point = ECPointAffine.from_int(bitcoin_curve, i)
return PublicKey.from_point(point)
def from_base64(b64str, testnet=False):
return PublicKey.from_bytes(base64.b64decode(b64str))
def from_bytes(key_bytes):
b = get_bytes(key_bytes)
key_bytes_len = len(b)
key_type = b[0]
if (key_type == 4):
if (key_bytes_len != 65):
raise ValueError('key_bytes must be exactly 65 bytes long when uncompressed.')
x = int.from_bytes(b[1:33], 'big')
y = int.from_bytes(b[33:65], 'big')
elif ((key_type == 2) or (key_type == 3)):
if (key_bytes_len != 33):
raise ValueError('key_bytes must be exactly 33 bytes long when compressed.')
x = int.from_bytes(b[1:33], 'big')
ys = bitcoin_curve.y_from_x(x)
last_bit = (key_type - 2)
for y in ys:
if ((y & 1) == last_bit):
break
else:
return None
return PublicKey(x, y)
def from_hex(h):
return PublicKey.from_bytes(h)
def from_signature(message, signature):
if (signature.recovery_id is None):
raise ValueError('The signature must have a recovery_id.')
msg = get_bytes(message)
pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id)
for (k, recid) in pub_keys:
if ((signature.recovery_id is not None) and (recid == signature.recovery_id)):
return PublicKey(k.x, k.y)
return None
def verify_bitcoin(message, signature, address):
magic_sig = base64.b64decode(signature)
magic = magic_sig[0]
sig = Signature.from_bytes(magic_sig[1:])
sig.recovery_id = ((magic - 27) & 3)
compressed = (((magic - 27) & 4) != 0)
msg = ((b'\x18Bitcoin Signed Message:\n' + bytes([len(message)])) + message)
msg_hash = hashlib.sha256(msg).digest()
derived_public_key = PublicKey.from_signature(msg_hash, sig)
if (derived_public_key is None):
raise ValueError('Could not recover public key from the provided signature.')
(ver, h160) = address_to_key_hash(address)
hash160 = derived_public_key.hash160(compressed)
if (hash160 != h160):
return False
return derived_public_key.verify(msg_hash, sig)
def __init__(self, x, y):
p = ECPointAffine(bitcoin_curve, x, y)
if (not bitcoin_curve.is_on_curve(p)):
raise ValueError('The provided (x, y) are not on the secp256k1 curve.')
self.point = p
r = hashlib.new('ripemd160')
r.update(hashlib.sha256(bytes(self)).digest())
self.ripe = r.digest()
r = hashlib.new('ripemd160')
r.update(hashlib.sha256(self.compressed_bytes).digest())
self.ripe_compressed = r.digest()
self.keccak = sha3(bytes(self)[1:])
def hash160(self, compressed=True):
return (self.ripe_compressed if compressed else self.ripe)
def address(self, compressed=True, testnet=False):
version = '0x'
return (version + binascii.hexlify(self.keccak[12:]).decode('ascii'))
def verify(self, message, signature, do_hash=True):
msg = get_bytes(message)
return bitcoin_curve.verify(msg, signature, self.point, do_hash)
def to_base64(self):
return base64.b64encode(bytes(self))
def __int__(self):
mask = ((2 ** 256) - 1)
return (((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask))
def __bytes__(self):
return bytes(self.point)
def compressed_bytes(self):
return self.point.compressed_bytes |
def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False):
rowd = OrderedDict(epoch=epoch)
rowd.update([(('train_' + k), v) for (k, v) in train_metrics.items()])
rowd.update([(('eval_' + k), v) for (k, v) in eval_metrics.items()])
with open(filename, mode='a') as cf:
dw = csv.DictWriter(cf, fieldnames=rowd.keys())
if write_header:
dw.writeheader()
dw.writerow(rowd) |
def test_make_cf_dataarray_lonlat():
from pyresample import create_area_def
from satpy.cf.data_array import make_cf_data_array
from satpy.resample import add_crs_xy_coords
area = create_area_def('mavas', 4326, shape=(5, 5), center=(0, 0), resolution=(1, 1))
da = xr.DataArray(np.arange(25).reshape(5, 5), dims=('y', 'x'), attrs={'area': area})
da = add_crs_xy_coords(da, area)
new_da = make_cf_data_array(da)
assert (new_da['x'].attrs['units'] == 'degrees_east')
assert (new_da['y'].attrs['units'] == 'degrees_north') |
class FacebookOAuth2(BaseOAuth2):
name = 'facebook'
REDIRECT_STATE = False
RESPONSE_TYPE = None
SCOPE_SEPARATOR = ','
AUTHORIZATION_URL = '
ACCESS_TOKEN_URL = '
REVOKE_TOKEN_URL = '
REVOKE_TOKEN_METHOD = 'DELETE'
USER_DATA_URL = '
EXTRA_DATA = [('id', 'id'), ('expires', 'expires'), ('granted_scopes', 'granted_scopes'), ('denied_scopes', 'denied_scopes')]
def auth_params(self, state=None):
params = super().auth_params(state)
params['return_scopes'] = 'true'
return params
def authorization_url(self):
version = self.setting('API_VERSION', API_VERSION)
return self.AUTHORIZATION_URL.format(version=version)
def access_token_url(self):
version = self.setting('API_VERSION', API_VERSION)
return self.ACCESS_TOKEN_URL.format(version=version)
def get_user_details(self, response):
(fullname, first_name, last_name) = self.get_user_names(response.get('name', ''), response.get('first_name', ''), response.get('last_name', ''))
return {'username': response.get('username', response.get('name')), 'email': response.get('email', ''), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
params = self.setting('PROFILE_EXTRA_PARAMS', {}).copy()
params['access_token'] = access_token
if self.setting('APPSECRET_PROOF', True):
(_, secret) = self.get_key_and_secret()
params['appsecret_proof'] = hmac.new(secret.encode('utf8'), msg=access_token.encode('utf8'), digestmod=hashlib.sha256).hexdigest()
version = self.setting('API_VERSION', API_VERSION)
return self.get_json(self.USER_DATA_URL.format(version=version), params=params)
def process_error(self, data):
super().process_error(data)
if data.get('error_code'):
raise AuthCanceled(self, (data.get('error_message') or data.get('error_code')))
_
def auth_complete(self, *args, **kwargs):
self.process_error(self.data)
if (not self.data.get('code')):
raise AuthMissingParameter(self, 'code')
state = self.validate_state()
(key, secret) = self.get_key_and_secret()
response = self.request(self.access_token_url(), params={'client_id': key, 'redirect_uri': self.get_redirect_uri(state), 'client_secret': secret, 'code': self.data['code']})
try:
response = response.json()
except ValueError:
response = parse_qs(response.text)
access_token = response['access_token']
return self.do_auth(access_token, response, *args, **kwargs)
def process_refresh_token_response(self, response, *args, **kwargs):
try:
return response.json()
except ValueError:
return parse_qs(response.content)
def refresh_token_params(self, token, *args, **kwargs):
(client_id, client_secret) = self.get_key_and_secret()
return {'fb_exchange_token': token, 'grant_type': 'fb_exchange_token', 'client_id': client_id, 'client_secret': client_secret}
_
def do_auth(self, access_token, response=None, *args, **kwargs):
response = (response or {})
data = self.user_data(access_token)
if (not isinstance(data, dict)):
raise AuthUnknownError(self, 'An error occurred while retrieving users Facebook data')
data['access_token'] = access_token
if ('expires_in' in response):
data['expires'] = response['expires_in']
if self.data.get('granted_scopes'):
data['granted_scopes'] = self.data['granted_scopes'].split(',')
if self.data.get('denied_scopes'):
data['denied_scopes'] = self.data['denied_scopes'].split(',')
kwargs.update({'backend': self, 'response': data})
return self.strategy.authenticate(*args, **kwargs)
def revoke_token_url(self, token, uid):
version = self.setting('API_VERSION', API_VERSION)
return self.REVOKE_TOKEN_URL.format(version=version, uid=uid)
def revoke_token_params(self, token, uid):
return {'access_token': token}
def process_revoke_token_response(self, response):
return (super().process_revoke_token_response(response) and (response.content == 'true')) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tsv')
parser.add_argument('--output-dir', required=True)
parser.add_argument('--output-name', required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, 'r') as tsv, open(os.path.join(args.output_dir, (args.output_name + '.ltr')), 'w') as ltr_out, open(os.path.join(args.output_dir, (args.output_name + '.wrd')), 'w') as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if (dir not in transcriptions):
parts = dir.split(os.path.sep)
trans_path = f'{parts[(- 2)]}-{parts[(- 1)]}.trans.txt'
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, 'r') as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = ' '.join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split('.')[0]
assert (part in transcriptions[dir])
print(transcriptions[dir][part], file=wrd_out)
print((' '.join(list(transcriptions[dir][part].replace(' ', '|'))) + ' |'), file=ltr_out) |
def update_sync_status_to_sync_now(mirror):
if (mirror.sync_status == RepoMirrorStatus.SYNCING):
return None
retries = max(mirror.sync_retries_remaining, 1)
query = RepoMirrorConfig.update(sync_transaction_id=uuid_generator(), sync_status=RepoMirrorStatus.SYNC_NOW, sync_expiration_date=None, sync_retries_remaining=retries).where((RepoMirrorConfig.id == mirror.id), (RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
if query.execute():
return RepoMirrorConfig.get_by_id(mirror.id)
return None |
def run_data_migration(apps, schema_editor):
Task = apps.get_model('tasks', 'Task')
set_null_to_blank(Task.objects.all(), ['uri', 'uri_prefix', 'key', 'comment', 'title_lang1', 'title_lang2', 'title_lang3', 'title_lang4', 'title_lang5', 'text_lang1', 'text_lang2', 'text_lang3', 'text_lang4', 'text_lang5']) |
def read_csv(file_path, QI_INDEX, IS_CAT, IS_DATETIME, SA_INDEX, header=False, delimiter=',', encoding='utf-8', TIME_FORMAT_STR='%Y-%m-%d %H:%M:%S'):
QI_num = len(QI_INDEX)
data = []
intuitive_dict = []
intuitive_order = []
intuitive_number = []
for i in range(QI_num):
intuitive_dict.append(dict())
intuitive_number.append(0)
intuitive_order.append(list())
with open(file_path, 'r', encoding=encoding) as data_file:
if header:
headers = data_file.readline()
for line in data_file:
if ((len(line) == 0) or ('?' in line)):
continue
temp = [item.strip() for item in line.split(delimiter)]
ltemp = []
if (not all(temp)):
continue
for i in range(QI_num):
index = QI_INDEX[i]
if IS_DATETIME[i]:
t = datetime.strptime(temp[index], TIME_FORMAT_STR)
ltemp.append(t)
elif IS_CAT[i]:
try:
ltemp.append(intuitive_dict[i][temp[index]])
except KeyError:
intuitive_dict[i][temp[index]] = intuitive_number[i]
ltemp.append(intuitive_number[i])
intuitive_number[i] += 1
intuitive_order[i].append(temp[index])
else:
ltemp.append(float(temp[index]))
ltemp.append(temp[SA_INDEX])
data.append(ltemp)
return (data, intuitive_order) |
class TestNonChrootClient(KazooTestCase):
def test_create(self):
client = self._get_nonchroot_client()
assert (client.chroot == '')
client.start()
node = uuid.uuid4().hex
path = client.create(node, ephemeral=True)
client.delete(path)
client.stop()
def test_unchroot(self):
client = self._get_nonchroot_client()
client.chroot = '/a'
assert (client.unchroot('/a') == '/')
assert (client.unchroot('/a/b') == '/b')
assert (client.unchroot('/b/c') == '/b/c') |
def test_sorm():
(options, stochastic_model, limit_state) = setup()
Analysis = ra.Sorm(analysis_options=options, stochastic_model=stochastic_model, limit_state=limit_state)
Analysis.run()
print(Analysis.betag_breitung)
print(Analysis.betag_breitung_m)
assert (pytest.approx(Analysis.betaHL, abs=0.0001) == 3.7347)
assert (pytest.approx(Analysis.betag_breitung, abs=0.0001) == 3.8537)
assert (pytest.approx(Analysis.betag_breitung_m, abs=0.0002) == 3.8582) |
class SphereMarginProduct(nn.Module):
def __init__(self, in_feature, out_feature, m=4, base=1000.0, gamma=0.0001, power=2, lambda_min=5.0, iter=0):
assert (m in [1, 2, 3, 4]), 'margin should be 1, 2, 3 or 4'
self.in_feature = in_feature
self.out_feature = out_feature
self.m = m
self.base = base
self.gamma = gamma
self.power = power
self.lambda_min = lambda_min
self.iter = 0
self.weight = Parameter(torch.Tensor(out_feature, in_feature))
nn.init.xavier_uniform_(self.weight)
self.margin_formula = [(lambda x: (x ** 0)), (lambda x: (x ** 1)), (lambda x: ((2 * (x ** 2)) - 1)), (lambda x: ((4 * (x ** 3)) - (3 * x))), (lambda x: (((8 * (x ** 4)) - (8 * (x ** 2))) + 1)), (lambda x: (((16 * (x ** 5)) - (20 * (x ** 3))) + (5 * x)))]
def forward(self, input, label):
self.iter += 1
self.cur_lambda = max(self.lambda_min, (self.base * ((1 + (self.gamma * self.iter)) ** ((- 1) * self.power))))
cos_theta = F.linear(F.normalize(input), F.normalize(self.weight))
cos_theta = cos_theta((- 1), 1)
cos_m_theta = self.margin_formula(self.m)(cos_theta)
theta = cos_theta.data.acos()
k = ((self.m * theta) / math.pi).floor()
phi_theta = ((((- 1.0) ** k) * cos_m_theta) - (2 * k))
phi_theta_ = (((self.cur_lambda * cos_theta) + phi_theta) / (1 + self.cur_lambda))
norm_of_feature = torch.norm(input, 2, 1)
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view((- 1), 1), 1)
output = ((one_hot * phi_theta_) + ((1 - one_hot) * cos_theta))
output *= norm_of_feature.view((- 1), 1)
return output |
class DecisionMatrixDominanceAccessor(AccessorABC):
_default_kind = 'dominance'
def __init__(self, dm):
self._dm = dm
_cache(maxsize=None)
def _dominance_cache(self):
dm = self._dm
reverse = dm.minwhere
(dominance_cache, alts_numpy) = ({}, {})
for (a0, a1) in it.combinations(dm.alternatives, 2):
for aname in (a0, a1):
if (aname not in alts_numpy):
alts_numpy[aname] = dm.alternatives[aname]
dominance_cache[(a0, a1)] = rank.dominance(alts_numpy[a0], alts_numpy[a1], reverse=reverse)
return dominance_cache
def _cache_read(self, a0, a1):
key = (a0, a1)
cache = self._dominance_cache
(entry, key_reverted) = ((cache[key], False) if (key in cache) else (cache[key[::(- 1)]], True))
return (entry, key_reverted)
def _create_frame(self, compute_cell, iname, cname):
alternatives = self._dm.alternatives
rows = []
for a0 in alternatives:
row = OrderedDict()
for a1 in alternatives:
row[a1] = compute_cell(a0, a1)
rows.append(row)
df = pd.DataFrame(rows, index=alternatives)
df.index.name = iname
df.columns.name = cname
return df
def bt(self):
def compute_cell(a0, a1):
if (a0 == a1):
return 0
(centry, ckreverted) = self._cache_read(a0, a1)
return (centry.aDb if (not ckreverted) else centry.bDa)
return self._create_frame(compute_cell, iname='Better than', cname='Worse than')
def eq(self):
criteria_len = len(self._dm.criteria)
def compute_cell(a0, a1):
if (a0 == a1):
return criteria_len
(centry, _) = self._cache_read(a0, a1)
return centry.eq
return self._create_frame(compute_cell, iname='Equals to', cname='Equals to')
def dominance(self, *, strict=False):
def compute_cell(a0, a1):
if (a0 == a1):
return False
(centry, ckreverted) = self._cache_read(a0, a1)
(performance_a0, performance_a1) = ((centry.aDb, centry.bDa) if (not ckreverted) else (centry.bDa, centry.aDb))
if (strict and centry.eq):
return False
return ((performance_a0 > 0) and (performance_a1 == 0))
(iname, cname) = (('Strict dominators', 'Strictly dominated') if strict else ('Dominators', 'Dominated'))
dom = self._create_frame(compute_cell, iname=iname, cname=cname)
return dom
def compare(self, a0, a1):
(centry, ckreverted) = self._cache_read(a0, a1)
(performance_a0, performance_a1) = ((centry.aDb, centry.bDa) if (not ckreverted) else (centry.bDa, centry.aDb))
(where_aDb, where_bDa) = ((centry.aDb_where, centry.bDa_where) if (not ckreverted) else (centry.bDa_where, centry.aDb_where))
(eq, eq_where) = (centry.eq, centry.eq_where)
criteria = self._dm.criteria
alt_index = pd.MultiIndex.from_tuples([('Alternatives', a0), ('Alternatives', a1), ('Equals', '')])
crit_index = pd.MultiIndex.from_product([['Criteria'], criteria])
df = pd.DataFrame([pd.Series(where_aDb, name=alt_index[0], index=crit_index), pd.Series(where_bDa, name=alt_index[1], index=crit_index), pd.Series(eq_where, name=alt_index[2], index=crit_index)])
df = df.assign(Performance=[performance_a0, performance_a1, eq])
return df
def dominated(self, *, strict=False):
dom = self.dominance(strict=strict).any()
dom.name = dom.index.name
dom.index.name = 'Alternatives'
return dom
_cache(maxsize=None)
def dominators_of(self, a, *, strict=False):
dominance_a = self.dominance(strict=strict)[a]
if (~ dominance_a.any()):
return np.array([], dtype=str)
dominators = dominance_a.index[dominance_a]
for dominator in dominators:
dominators_dominators = self.dominators_of(dominator, strict=strict)
dominators = np.concatenate((dominators, dominators_dominators))
return dominators
def has_loops(self, *, strict=False):
alternatives = list(self.dominated(strict=strict).sort_values().index)
try:
while alternatives:
alt = alternatives.pop()
dominators = self.dominators_of(alt, strict=strict)
alternatives = [a for a in alternatives if (a not in dominators)]
except RecursionError:
return True
return False |
.parametrize('outformat', ['TEXT', 'JSON'])
def test_non_json_instance_mixed_with_valid_and_invalid_data(run_line, tmp_path, outformat):
schema = (tmp_path / 'schema.json')
malformed_instance = (tmp_path / 'malformed_instance.json')
good_instance = (tmp_path / 'good_instance.json')
bad_instance = (tmp_path / 'bad_instance.json')
schema.write_text(json.dumps(TITLE_SCHEMA))
malformed_instance.write_text('{')
good_instance.write_text('{"title": "ohai"}')
bad_instance.write_text('{"title": false}')
res = run_line(['check-jsonschema', '-o', outformat, '--schemafile', str(schema), str(good_instance), str(malformed_instance), str(bad_instance)])
assert (res.exit_code == 1)
if (outformat == 'TEXT'):
assert (f'Failed to parse {str(malformed_instance)}' in res.stdout)
assert (f"{str(bad_instance)}::$.title: False is not of type 'string'" in res.stdout)
else:
report = json.loads(res.stdout)
assert (report['status'] == 'fail')
assert ('errors' in report)
assert (len(report['errors']) == 1)
assert ('parse_errors' in report)
assert (len(report['parse_errors']) == 1)
error_item = report['parse_errors'][0]
assert (error_item['filename'] == str(malformed_instance))
assert (f'Failed to parse {str(malformed_instance)}' in error_item['message']) |
def _applyfcn(obj, name, attrfcn, dictfcn, listfcn):
if (name[0] == '['):
key = ast.literal_eval(name[1:(- 1)])
if isinstance(obj, dict):
return dictfcn(obj, key)
elif isinstance(obj, list):
return listfcn(obj, key)
else:
msg = 'The parameter with [] must be either a dictionary or a list. '
msg += ('Got type: %s' % type(obj))
raise TypeError(msg)
else:
return attrfcn(obj, name) |
class PyAnalogClock(QWidget):
timeChanged = pyqtSignal(QTime)
timeZoneChanged = pyqtSignal(int)
def __init__(self, parent=None):
super(PyAnalogClock, self).__init__(parent)
self.timeZoneOffset = 0
timer = QTimer(self)
timer.timeout.connect(self.update)
timer.timeout.connect(self.updateTime)
timer.start(1000)
self.setWindowTitle('Analog Clock')
self.resize(200, 200)
self.hourHand = QPolygon([QPoint(7, 8), QPoint((- 7), 8), QPoint(0, (- 40))])
self.minuteHand = QPolygon([QPoint(7, 8), QPoint((- 7), 8), QPoint(0, (- 70))])
self.hourColor = QColor(0, 127, 0)
self.minuteColor = QColor(0, 127, 127, 191)
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QTime.currentTime()
time = time.addSecs((self.timeZoneOffset * 3600))
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate((self.width() / 2), (self.height() / 2))
painter.scale((side / 200.0), (side / 200.0))
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(self.hourColor))
painter.save()
painter.rotate((30.0 * (time.hour() + (time.minute() / 60.0))))
painter.drawConvexPolygon(self.hourHand)
painter.restore()
painter.setPen(self.hourColor)
for i in range(0, 12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(self.minuteColor))
painter.save()
painter.rotate((6.0 * (time.minute() + (time.second() / 60.0))))
painter.drawConvexPolygon(self.minuteHand)
painter.restore()
painter.setPen(QPen(self.minuteColor))
for j in range(0, 60):
if ((j % 5) != 0):
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0)
painter.end()
def minimumSizeHint(self):
return QSize(50, 50)
def sizeHint(self):
return QSize(100, 100)
def updateTime(self):
self.timeChanged.emit(QTime.currentTime())
def getTimeZone(self):
return self.timeZoneOffset
(int)
def setTimeZone(self, value):
self.timeZoneOffset = value
self.timeZoneChanged.emit(value)
self.update()
def resetTimeZone(self):
self.timeZoneOffset = 0
self.timeZoneChanged.emit(0)
self.update()
timeZone = pyqtProperty(int, getTimeZone, setTimeZone, resetTimeZone) |
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(573, 468)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.vboxlayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.vboxlayout.setContentsMargins(0, 0, 0, 0)
self.vboxlayout.setSpacing(0)
self.vboxlayout.setObjectName('vboxlayout')
self.view = QtWidgets.QTreeView(self.centralwidget)
self.view.setAlternatingRowColors(True)
self.view.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectItems)
self.view.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.view.setAnimated(False)
self.view.setAllColumnsShowFocus(True)
self.view.setObjectName('view')
self.vboxlayout.addWidget(self.view)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 573, 31))
self.menubar.setObjectName('menubar')
self.fileMenu = QtWidgets.QMenu(self.menubar)
self.fileMenu.setObjectName('fileMenu')
self.actionsMenu = QtWidgets.QMenu(self.menubar)
self.actionsMenu.setObjectName('actionsMenu')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.exitAction = QtWidgets.QAction(MainWindow)
self.exitAction.setObjectName('exitAction')
self.insertRowAction = QtWidgets.QAction(MainWindow)
self.insertRowAction.setObjectName('insertRowAction')
self.removeRowAction = QtWidgets.QAction(MainWindow)
self.removeRowAction.setObjectName('removeRowAction')
self.insertColumnAction = QtWidgets.QAction(MainWindow)
self.insertColumnAction.setObjectName('insertColumnAction')
self.removeColumnAction = QtWidgets.QAction(MainWindow)
self.removeColumnAction.setObjectName('removeColumnAction')
self.insertChildAction = QtWidgets.QAction(MainWindow)
self.insertChildAction.setObjectName('insertChildAction')
self.fileMenu.addAction(self.exitAction)
self.actionsMenu.addAction(self.insertRowAction)
self.actionsMenu.addAction(self.insertColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.removeRowAction)
self.actionsMenu.addAction(self.removeColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.insertChildAction)
self.menubar.addAction(self.fileMenu.menuAction())
self.menubar.addAction(self.actionsMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'Editable Tree Model'))
self.fileMenu.setTitle(_translate('MainWindow', '&File'))
self.actionsMenu.setTitle(_translate('MainWindow', '&Actions'))
self.exitAction.setText(_translate('MainWindow', 'E&xit'))
self.exitAction.setShortcut(_translate('MainWindow', 'Ctrl+Q'))
self.insertRowAction.setText(_translate('MainWindow', 'Insert Row'))
self.insertRowAction.setShortcut(_translate('MainWindow', 'Ctrl+I, R'))
self.removeRowAction.setText(_translate('MainWindow', 'Remove Row'))
self.removeRowAction.setShortcut(_translate('MainWindow', 'Ctrl+R, R'))
self.insertColumnAction.setText(_translate('MainWindow', 'Insert Column'))
self.insertColumnAction.setShortcut(_translate('MainWindow', 'Ctrl+I, C'))
self.removeColumnAction.setText(_translate('MainWindow', 'Remove Column'))
self.removeColumnAction.setShortcut(_translate('MainWindow', 'Ctrl+R, C'))
self.insertChildAction.setText(_translate('MainWindow', 'Insert Child'))
self.insertChildAction.setShortcut(_translate('MainWindow', 'Ctrl+N')) |
def best_matches(current: str, options: Collection[str], n: int) -> list[str]:
if (not current):
return []
options = [o for o in options if (_real_quick_ratio(current, o) > 0.75)]
if (len(options) >= 50):
options = [o for o in options if (abs((len(o) - len(current))) <= 1)]
ratios = {option: difflib.SequenceMatcher(a=current, b=option).ratio() for option in options}
options = [option for (option, ratio) in ratios.items() if (ratio > 0.75)]
return sorted(options, key=(lambda v: ((- ratios[v]), v)))[:n] |
def find_compatible_wheel(wheels: Sequence[T], identifier: str) -> (T | None):
(interpreter, platform) = identifier.split('-')
for wheel in wheels:
(_, _, _, tags) = parse_wheel_filename(wheel.name)
for tag in tags:
if (tag.abi == 'abi3'):
if (not (interpreter.startswith('cp3') and tag.interpreter.startswith('cp3'))):
continue
elif (tag.abi == 'none'):
if (tag.interpreter[:3] != 'py3'):
continue
else:
continue
if ((tag.interpreter != 'py3') and (int(tag.interpreter[3:]) > int(interpreter[3:]))):
continue
if platform.startswith(('manylinux', 'musllinux', 'macosx')):
(os_, arch) = platform.split('_', 1)
if (not tag.platform.startswith(os_)):
continue
if (not tag.platform.endswith(f'_{arch}')):
continue
elif (tag.platform != platform):
continue
return wheel
return None |
class SmartStrip(SmartDevice):
def __init__(self, host: str, *, config: Optional[DeviceConfig]=None, protocol: Optional[TPLinkProtocol]=None) -> None:
super().__init__(host=host, config=config, protocol=protocol)
self.emeter_type = 'emeter'
self._device_type = DeviceType.Strip
self.add_module('antitheft', Antitheft(self, 'anti_theft'))
self.add_module('schedule', Schedule(self, 'schedule'))
self.add_module('usage', Usage(self, 'schedule'))
self.add_module('time', Time(self, 'time'))
self.add_module('countdown', Countdown(self, 'countdown'))
self.add_module('emeter', Emeter(self, 'emeter'))
_update
def is_on(self) -> bool:
return any((plug.is_on for plug in self.children))
async def update(self, update_children: bool=True):
(await super().update(update_children))
if (not self.children):
children = self.sys_info['children']
_LOGGER.debug('Initializing %s child sockets', len(children))
for child in children:
self.children.append(SmartStripPlug(self.host, parent=self, child_id=child['id']))
if (update_children and self.has_emeter):
for plug in self.children:
(await plug.update())
async def turn_on(self, **kwargs):
(await self._query_helper('system', 'set_relay_state', {'state': 1}))
async def turn_off(self, **kwargs):
(await self._query_helper('system', 'set_relay_state', {'state': 0}))
_update
def on_since(self) -> Optional[datetime]:
if self.is_off:
return None
return max((plug.on_since for plug in self.children if (plug.on_since is not None)))
_update
def led(self) -> bool:
sys_info = self.sys_info
return bool((1 - sys_info['led_off']))
async def set_led(self, state: bool):
(await self._query_helper('system', 'set_led_off', {'off': int((not state))}))
_update
def state_information(self) -> Dict[(str, Any)]:
return {'LED state': self.led, 'Childs count': len(self.children), 'On since': self.on_since}
async def current_consumption(self) -> float:
return sum([(await plug.current_consumption()) for plug in self.children])
_update
async def get_emeter_realtime(self) -> EmeterStatus:
emeter_rt = (await self._async_get_emeter_sum('get_emeter_realtime', {}))
emeter_rt['voltage_mv'] = int((emeter_rt['voltage_mv'] / len(self.children)))
return EmeterStatus(emeter_rt)
_update
async def get_emeter_daily(self, year: Optional[int]=None, month: Optional[int]=None, kwh: bool=True) -> Dict:
return (await self._async_get_emeter_sum('get_emeter_daily', {'year': year, 'month': month, 'kwh': kwh}))
_update
async def get_emeter_monthly(self, year: Optional[int]=None, kwh: bool=True) -> Dict:
return (await self._async_get_emeter_sum('get_emeter_monthly', {'year': year, 'kwh': kwh}))
async def _async_get_emeter_sum(self, func: str, kwargs: Dict[(str, Any)]) -> Dict:
self._verify_emeter()
return merge_sums([(await getattr(plug, func)(**kwargs)) for plug in self.children])
_update
async def erase_emeter_stats(self):
for plug in self.children:
(await plug.erase_emeter_stats())
_update
def emeter_this_month(self) -> Optional[float]:
return sum((plug.emeter_this_month for plug in self.children))
_update
def emeter_today(self) -> Optional[float]:
return sum((plug.emeter_today for plug in self.children))
_update
def emeter_realtime(self) -> EmeterStatus:
emeter = merge_sums([plug.emeter_realtime for plug in self.children])
emeter['voltage_mv'] = int((emeter['voltage_mv'] / len(self.children)))
return EmeterStatus(emeter) |
def get_h36m_generator(loader, dynamic_length=True, opt=None):
while True:
for (i, data) in enumerate(loader):
seq_len = loader.dataset.get_seq_len()
pose_2d = data['pose_2d'].permute(1, 0, 2, 3).float().cuda()
pose_3d = data['pose_3d'].permute(1, 0, 2, 3).float().cuda()
camera_view = data['camera_view']
speed = data['speed']
breakpoints = data['breakpoints']
if dynamic_length:
pose_2d = pose_2d[:seq_len]
pose_3d = pose_3d[:seq_len]
(yield (pose_2d, pose_3d, camera_view)) |
class BadDestroyMap(DebugModeError):
def __init__(self, node, idx, old_val, new_val, perform):
super().__init__()
self.node = node
self.idx = idx
self.old_val = old_val
self.new_val = new_val
self.perform = perform
def __str__(self):
sio = StringIO()
print(' node:', self.node, file=sio)
print(' perform:', self.perform, file=sio)
print(' node.inputs:', [(str(i), id(i)) for i in self.node.inputs], file=sio)
print(' destroy_map:', self.node.op.destroy_map, file=sio)
print(' changed input idx:', self.idx, file=sio)
print(' changed input type:', self.node.inputs[self.idx].type, file=sio)
print(' repr (old val):', repr(self.old_val), file=sio)
print(' repr (new val):', repr(self.new_val), file=sio)
try:
npy_old_val = np.asarray(self.old_val)
npy_new_val = np.asarray(self.new_val)
print(' value dtype (new <space> old):', npy_new_val.dtype, npy_old_val.dtype, file=sio)
print(' value shape (new <space> old):', npy_new_val.shape, npy_old_val.shape, file=sio)
print(' value min (new <space> old):', npy_new_val.min(), npy_old_val.min(), file=sio)
print(' value max (new <space> old):', npy_new_val.max(), npy_old_val.max(), file=sio)
delta = (npy_new_val - npy_old_val)
print(' value min (new-old):', delta.min(), file=sio)
print(' value max (new-old):', delta.max(), file=sio)
print(' value argmin (new-old):', np.unravel_index(delta.argmin(), npy_new_val.shape), file=sio)
print(' value argmax (new-old):', np.unravel_index(delta.argmax(), npy_new_val.shape), file=sio)
print(' location of first 10 mismatches:', np.transpose(np.nonzero(delta))[:10], file=sio)
print('', file=sio)
except Exception as e:
print(f'(Numpy-hints failed with: {e})', file=sio)
print(' Hint: this can also be caused by a deficient values_eq_approx() or __eq__() implementation [which compared input values]', file=sio)
return sio.getvalue() |
def main():
(opts, args) = parse_args()
assert opts.build_root
assert opts.dest_dir
dest_arch = None
if opts.dest_arch:
if opts.dest_arch.endswith('.tar'):
dest_arch = tarfile.open(opts.dest_arch, 'w', dereference=True)
elif (opts.dest_arch.endswith('.tar.gz') or opts.dest_arch.endswith('.tgz')):
dest_arch = tarfile.open(opts.dest_arch, 'w:gz', dereference=True)
else:
raise Exception('Unsopported archive type for {}. Use one of: tar, tar.gz, tgz.'.format(os.path.basename(opts.dest_arch)))
for arg in args:
dst = arg
if dst.startswith(opts.build_root):
dst = dst[(len(opts.build_root) + 1):]
if (dest_arch and (not arg.endswith('.pkg.fake'))):
dest_arch.add(arg, arcname=dst)
copy_file(arg, os.path.join(opts.dest_dir, dst))
if dest_arch:
dest_arch.close() |
class ContextualEmbedV2(nn.Module):
def __init__(self, model_path, padding_idx=0):
super(ContextualEmbedV2, self).__init__()
state_dict = torch.load(model_path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([((name, param.data) if isinstance(param, Parameter) else (name, param)) for (name, param) in state_dict.items() if ('0' in name)])
state_dict2 = dict([((name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)) for (name, param) in state_dict.items() if ('1' in name)])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters():
p.requires_grad = False
self.output_size = 600
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
pass
def forward(self, x, x_mask):
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
max_len = x_mask.size(1)
(lens, indices) = torch.sort(lengths, 0, True)
(output1, _) = self.rnn1(pack(x[indices], lens.tolist(), batch_first=True))
(output2, _) = self.rnn2(output1)
output1 = unpack(output1, batch_first=True, total_length=max_len)[0]
output2 = unpack(output2, batch_first=True, total_length=max_len)[0]
(_, _indices) = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return (output1, output2) |
def anonymize_ip_address(ip_address):
ip_mask = int('0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000', 16)
try:
ip_obj = ipaddress.ip_address(force_str(ip_address))
except ValueError:
return None
anonymized_ip = ipaddress.ip_address((int(ip_obj) & ip_mask))
return anonymized_ip.compressed |
def thc_objective_grad(xcur, norb, nthc, eri, verbose=False):
etaPp = numpy.array(xcur[:(norb * nthc)]).reshape(nthc, norb)
MPQ = numpy.array(xcur[(norb * nthc):((norb * nthc) + (nthc * nthc))]).reshape(nthc, nthc)
CprP = numpy.einsum('Pp,Pr->prP', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = (eri - Iapprox)
res = (0.5 * numpy.sum((deri ** 2)))
if verbose:
print('res, max, lambda = {}, {}'.format(res, numpy.max(numpy.abs(deri))))
dL_dZab = (- numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)]))
dL_dX_GT = ((- 2) * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path', (0, 3), (1, 2), (0, 1)]))
dL_dX_GT -= (2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)]))
return numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel())) |
class GameModel(Model):
class Meta():
read_capacity_units = 1
write_capacity_units = 1
table_name = 'GameModel'
host = '
player_id = UnicodeAttribute(hash_key=True)
created_time = UTCDateTimeAttribute(range_key=True)
winner_id = UnicodeAttribute()
loser_id = UnicodeAttribute(null=True)
player_opponent_index = GamePlayerOpponentIndex()
opponent_time_index = GameOpponentTimeIndex() |
class Migration(migrations.Migration):
dependencies = [('adserver', '0015_publisher_unauthed_ads')]
operations = [migrations.AlterModelOptions(name='adtype', options={'ordering': ('order', 'name')}), migrations.AddField(model_name='adtype', name='description', field=models.CharField(blank=True, default='', help_text='A short description of the ad type to guide advertisers.', max_length=255)), migrations.AddField(model_name='adtype', name='order', field=models.PositiveSmallIntegerField(default=0))] |
def slugify(s, ok=SLUG_OK, lower=True, spaces=False, only_ascii=False, space_replacement='-'):
if (only_ascii and (ok != SLUG_OK) and hasattr(ok, 'decode')):
try:
ok.decode('ascii')
except UnicodeEncodeError:
raise ValueError(('You can not use "only_ascii=True" with a non ascii available chars in "ok" ("%s" given)' % ok))
rv = []
for c in unicodedata.normalize('NFKC', smart_text(s)):
cat = unicodedata.category(c)[0]
if ((cat in 'LN') or (c in ok)):
rv.append(c)
elif (cat == 'Z'):
rv.append(' ')
new = ''.join(rv).strip()
if only_ascii:
new = unidecode(new)
if (not spaces):
if (space_replacement and (space_replacement not in ok)):
space_replacement = (ok[0] if ok else '')
new = re.sub(('[%s\\s]+' % space_replacement), space_replacement, new)
if lower:
new = new.lower()
return new |
def get_normal_loss(input, label, num_output, lambda_value, m_value=4):
feature_dim = input.get_shape()[1]
weight = tf.get_variable('weight', shape=[num_output, feature_dim], regularizer=l2_regularizer, initializer=xavier)
prob_distribution = tf.one_hot(label, num_output)
weight = tf.nn.l2_normalize(weight, dim=1)
label_float = tf.cast(label, tf.float32)
margin_out = marginInnerProduct_module.margin_inner_product(input, weight, tf.constant(m_value), lambda_value, label_float)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=margin_out, labels=prob_distribution))
return loss |
def test_service_browser_started_after_zeroconf_closed():
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = '_hap._tcp.local.'
class MyServiceListener(r.ServiceListener):
pass
listener = MyServiceListener()
zc.close()
with pytest.raises(RuntimeError):
r.ServiceBrowser(zc, type_, None, listener) |
class WeightSvdModuleSplitter():
def split_module(cls, module, name, rank, svd_lib_ref):
if isinstance(module, Conv2d):
split_modules = cls.split_conv_module(module, name, rank, svd_lib_ref)
elif isinstance(module, Linear):
split_modules = cls.split_fc_module(module, name, rank, svd_lib_ref)
else:
raise AssertionError('Weight SVD only supports Conv2d and FC modules currently')
return split_modules
def split_conv_module(cls, module, name, rank, svd_lib_ref):
(split_weights, weight_sizes) = ([], [])
conv_a_weight_shape = (rank, module.in_channels, 1, 1)
conv_a_weight = np.zeros(conv_a_weight_shape)
split_weights.append(conv_a_weight.flatten().tolist())
weight_sizes.append(conv_a_weight.size)
conv_b_weight_shape = (module.out_channels, rank, *module.kernel_size)
conv_b_weight = np.zeros(conv_b_weight_shape)
split_weights.append(conv_b_weight.flatten().tolist())
weight_sizes.append(conv_b_weight.size)
split_weights = svd_lib_ref.SplitLayerWeights(str(name), split_weights, weight_sizes, [rank])
logger.debug('Splitting conv module weight of shape %r into %r and %r', module.weight.shape, conv_a_weight.shape, conv_b_weight.shape)
conv_a = torch.nn.Conv2d(module.in_channels, rank, kernel_size=(1, 1), stride=(1, 1), dilation=module.dilation)
conv_b = torch.nn.Conv2d(rank, module.out_channels, kernel_size=module.kernel_size, stride=module.stride, padding=module.padding, dilation=module.dilation)
conv_a.weight = torch.nn.Parameter(torch.from_numpy(np.array(split_weights[0], dtype=np.float32).reshape(conv_a_weight_shape)))
conv_b.weight = torch.nn.Parameter(torch.from_numpy(np.array(split_weights[1], dtype=np.float32).reshape(conv_b_weight_shape)))
if module.weight.is_cuda:
conv_a.weight = torch.nn.Parameter(conv_a.weight.cuda())
conv_b.weight = torch.nn.Parameter(conv_b.weight.cuda())
cls._split_conv_bias(conv_a, conv_b, module, name, rank, svd_lib_ref)
return (conv_a, conv_b)
def _split_conv_bias(conv_a, conv_b, module, name, rank, svd_lib_ref):
if (module.bias is not None):
(split_biases, bias_sizes) = ([], [])
conv_a_bias = np.zeros(rank)
split_biases.append(conv_a_bias.flatten().tolist())
bias_sizes.append(conv_a_bias.size)
conv_b_bias = np.zeros(module.out_channels)
split_biases.append(conv_b_bias.flatten().tolist())
bias_sizes.append(conv_b_bias.size)
split_biases = svd_lib_ref.SplitLayerBiases(str(name), split_biases, bias_sizes, [rank])
conv_a.bias = torch.nn.Parameter(torch.from_numpy(np.array(split_biases[0], dtype=np.float32)))
conv_b.bias = torch.nn.Parameter(torch.from_numpy(np.array(split_biases[1], dtype=np.float32)))
if module.bias.is_cuda:
conv_a.bias = torch.nn.Parameter(conv_a.bias.cuda())
conv_b.bias = torch.nn.Parameter(conv_b.bias.cuda())
else:
conv_a.bias = None
conv_b.bias = None
def split_fc_module(cls, module, name, rank, svd_lib_ref):
(split_weights, weight_sizes) = ([], [])
fc_a_weight_shape = (rank, module.in_features)
fc_a_weight = np.zeros(fc_a_weight_shape)
split_weights.append(fc_a_weight.flatten().tolist())
weight_sizes.append(fc_a_weight.size)
fc_b_weight_shape = (module.out_features, rank)
fc_b_weight = np.zeros(fc_b_weight_shape)
split_weights.append(fc_b_weight.flatten().tolist())
weight_sizes.append(fc_b_weight.size)
split_weights = svd_lib_ref.SplitLayerWeights(str(name), split_weights, weight_sizes, [rank])
fc_a = torch.nn.Linear(module.in_features, rank)
fc_b = torch.nn.Linear(rank, module.out_features)
fc_a.weight = torch.nn.Parameter(torch.from_numpy(np.array(split_weights[0], dtype=np.float32).reshape(fc_a_weight_shape)))
fc_b.weight = torch.nn.Parameter(torch.from_numpy(np.array(split_weights[1], dtype=np.float32).reshape(fc_b_weight_shape)))
if module.weight.is_cuda:
fc_a.weight = torch.nn.Parameter(fc_a.weight.cuda())
fc_b.weight = torch.nn.Parameter(fc_b.weight.cuda())
cls._split_fc_bias(fc_a, fc_b, module, name, rank, svd_lib_ref)
return (fc_a, fc_b)
def _split_fc_bias(fc_a, fc_b, module, name, rank, svd_lib_ref):
if (module.bias is not None):
(split_biases, bias_sizes) = ([], [])
fc_a_bias = np.zeros(rank)
split_biases.append(fc_a_bias.flatten().tolist())
bias_sizes.append(fc_a_bias.size)
fc_b_bias = np.zeros(module.out_features)
split_biases.append(fc_b_bias.flatten().tolist())
bias_sizes.append(fc_b_bias.size)
split_biases = svd_lib_ref.SplitLayerBiases(str(name), split_biases, bias_sizes, [rank])
fc_a.bias = torch.nn.Parameter(torch.from_numpy(np.array(split_biases[0], dtype=np.float32)))
fc_b.bias = torch.nn.Parameter(torch.from_numpy(np.array(split_biases[1], dtype=np.float32)))
if module.bias.is_cuda:
fc_a.bias = torch.nn.Parameter(fc_a.bias.cuda())
fc_b.bias = torch.nn.Parameter(fc_b.bias.cuda())
else:
fc_a.bias = None
fc_b.bias = None |
def key(w, probability=1.0):
if (random.random() > probability):
return w
'\n Swaps $n$ letters with their nearest keys\n '
w = list(w)
i = random.randint(0, (len(w) - 1))
char = w[i]
caps = char.isupper()
if (char in NN):
w[i] = NN[char.lower()][random.randint(0, (len(NN[char.lower()]) - 1))]
if caps:
w[i].upper()
return ''.join(w) |
.parametrize('username,password', users)
def test_delete(db, client, username, password):
client.login(username=username, password=password)
instances = Task.objects.all()
for instance in instances:
url = reverse(urlnames['detail'], args=[instance.pk])
response = client.delete(url)
assert (response.status_code == status_map['delete'][username]), response.json() |
class APIKeyBucket():
def __init__(self, apikeys: [str], kps: int):
self.apikeys = apikeys
self.kps = kps
self._last_get_time = 0
self._get_interval = (1 / (len(self.apikeys) * kps))
self._lock = DeferredLock()
def get(self) -> str:
self._lock.acquire()
now = time.time()
duration = (now - self._last_get_time)
if (duration < self._get_interval):
time.sleep((self._get_interval - duration))
self._last_get_time = time.time()
key = self.get_apikey()
self._lock.release()
return key
def get_apikey(self) -> str:
raise NotImplementedError() |
def apply_signature(value, sig, utf8_strings=False):
if (sig in TYPE_MAP):
return TYPE_MAP[sig](value)
elif sig.startswith('a{'):
return dbus.Dictionary(value, signature=sig[2:(- 1)])
elif sig.startswith('a('):
return dbus.Struct(value, signature=sig[2:(- 1)])
elif sig.startswith('a'):
return dbus.Array(value, signature=sig[1:])
elif (sig == 's'):
if isinstance(value, bytes):
value = value.decode('utf-8')
return dbus.String(value)
else:
return TYPE_MAP[sig](value)
return value |
class PolicyInformation():
def __init__(self, policy_identifier: ObjectIdentifier, policy_qualifiers: (typing.Iterable[(str | UserNotice)] | None)) -> None:
if (not isinstance(policy_identifier, ObjectIdentifier)):
raise TypeError('policy_identifier must be an ObjectIdentifier')
self._policy_identifier = policy_identifier
if (policy_qualifiers is not None):
policy_qualifiers = list(policy_qualifiers)
if (not all((isinstance(x, (str, UserNotice)) for x in policy_qualifiers))):
raise TypeError('policy_qualifiers must be a list of strings and/or UserNotice objects or None')
self._policy_qualifiers = policy_qualifiers
def __repr__(self) -> str:
return '<PolicyInformation(policy_identifier={0.policy_identifier}, policy_qualifiers={0.policy_qualifiers})>'.format(self)
def __eq__(self, other: object) -> bool:
if (not isinstance(other, PolicyInformation)):
return NotImplemented
return ((self.policy_identifier == other.policy_identifier) and (self.policy_qualifiers == other.policy_qualifiers))
def __hash__(self) -> int:
if (self.policy_qualifiers is not None):
pq: (tuple[((str | UserNotice), ...)] | None) = tuple(self.policy_qualifiers)
else:
pq = None
return hash((self.policy_identifier, pq))
def policy_identifier(self) -> ObjectIdentifier:
return self._policy_identifier
def policy_qualifiers(self) -> (list[(str | UserNotice)] | None):
return self._policy_qualifiers |
def test_SKCMethodABC_already_defined__skcriteria_parameters():
class Base(methods.SKCMethodABC):
_skcriteria_dm_type = 'foo'
_skcriteria_parameters = ['x']
def __init__(self, x):
pass
class Foo(Base):
def __init__(self, x):
pass
assert (Foo._skcriteria_parameters == {'x'}) |
class Solution():
def isNumber(self, s: str) -> bool:
s = s.lower()
state = [{}, {'blank': 1, 'sign': 2, 'digit': 3, '.': 4}, {'digit': 3, '.': 4}, {'digit': 3, '.': 5, 'e': 6, 'blank': 9}, {'digit': 5}, {'digit': 5, 'e': 6, 'blank': 9}, {'sign': 7, 'digit': 8}, {'digit': 8}, {'digit': 8, 'blank': 9}, {'blank': 9}]
final_state = [3, 5, 8, 9]
current_state = 1
for char in s:
if ('0' <= char <= '9'):
char = 'digit'
if (char == ' '):
char = 'blank'
if (char in ['+', '-']):
char = 'sign'
if (char not in state[current_state]):
return False
current_state = state[current_state][char]
if (current_state not in final_state):
return False
return True |
class Model2onnx(Callback):
def __init__(self, saved_model_path: str, metadata: dict=None, save_on_epoch_end: bool=False) -> None:
super().__init__()
self.saved_model_path = saved_model_path
self.metadata = metadata
self.save_on_epoch_end = save_on_epoch_end
try:
import tf2onnx
except:
raise ImportError("tf2onnx is not installed. Please install it using 'pip install tf2onnx'")
try:
import onnx
except:
raise ImportError("onnx is not installed. Please install it using 'pip install onnx'")
def model2onnx(model: tf.keras.Model, onnx_model_path: str):
try:
import tf2onnx
tf2onnx.convert.from_keras(model, output_path=onnx_model_path)
except Exception as e:
print(e)
def include_metadata(onnx_model_path: str, metadata: dict=None):
try:
if (metadata and isinstance(metadata, dict)):
import onnx
onnx_model = onnx.load(onnx_model_path)
for (key, value) in metadata.items():
meta = onnx_model.metadata_props.add()
meta.key = key
meta.value = str(value)
onnx.save(onnx_model, onnx_model_path)
except Exception as e:
print(e)
def on_epoch_end(self, epoch: int, logs: dict=None):
if self.save_on_epoch_end:
self.on_train_end(logs=logs)
def on_train_end(self, logs=None):
self.model.load_weights(self.saved_model_path)
onnx_model_path = self.saved_model_path.replace('.h5', '.onnx')
self.model2onnx(self.model, onnx_model_path)
self.include_metadata(onnx_model_path, self.metadata) |
def convert_heads_to_classy_model(state_dict, out_prefix, num_fc_layers, use_bn_head=False, use_bias_head_fc=True):
logger.info('Converting head...')
converted_dict = {'block3-2': {'default_head': {}}}
if (num_fc_layers > 1):
out_dict = {}
for idx in range((num_fc_layers - 1)):
local_prefix = f'mlp.layers.{idx}'
out_dict[f'{local_prefix}.fc.weight'] = state_dict[f'{idx}.clf.0.weight']
if use_bias_head_fc:
out_dict[f'{local_prefix}.fc.bias'] = state_dict[f'{idx}.clf.0.bias']
if use_bn_head:
bn = f'{local_prefix}.batch_norm'
out_dict[f'{bn}.weight'] = state_dict[f'{idx}.clf.1.weight']
out_dict[f'{bn}.bias'] = state_dict[f'{idx}.clf.1.bias']
out_dict[f'{bn}.running_mean'] = state_dict[f'{idx}.clf.1.running_mean']
out_dict[f'{bn}.running_var'] = state_dict[f'{idx}.clf.1.running_var']
out_dict[f'{out_prefix}_fc.weight'] = state_dict[f'{(idx + 1)}.clf.0.weight']
if use_bias_head_fc:
out_dict[f'{out_prefix}_fc.bias'] = state_dict[f'{(idx + 1)}.clf.0.bias']
converted_dict['block3-2']['default_head'] = out_dict
else:
converted_dict = {'block3-2': {'default_head': {f'{out_prefix}_fc.weight': state_dict['0.clf.0.weight'], f'{out_prefix}_fc.bias': state_dict['0.clf.0.bias']}}}
return converted_dict |
def test_asking_qm_questions():
type_ = '_quservice._tcp.local.'
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
old_send = zeroconf.async_send
first_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
nonlocal first_outgoing
if (first_outgoing is None):
first_outgoing = out
old_send(out, addr=addr, port=port)
with patch.object(zeroconf, 'async_send', send):
zeroconf.get_service_info(f'name.{type_}', type_, 500, question_type=r.DNSQuestionType.QM)
assert (first_outgoing.questions[0].unicast is False)
zeroconf.close() |
def formatAll(list, width=79):
text = '__all__ = ['
indent = len(text)
limit = (width - indent)
length = 0
for item in list:
length += (len(item) + 4)
if (length > limit):
text += ('\n' + (' ' * indent))
length = (len(item) + 4)
text += (('"' + item) + '", ')
if len(list):
text = text[:(- 2)]
text += ']'
return text |
def main(args):
wav_scp = codecs.open((Path(args.path) / 'wav.scp'), 'r', 'utf-8')
textgrid_flist = codecs.open((Path(args.path) / 'textgrid.flist'), 'r', 'utf-8')
utt2textgrid = {}
for line in textgrid_flist:
path = Path(line.strip())
uttid = path.stem
utt2textgrid[uttid] = path
all_segments = []
for line in wav_scp:
uttid = line.strip().split(' ')[0]
uttid_part = uttid
if (uttid_part not in utt2textgrid):
print(("%s doesn't have transcription" % uttid))
continue
segments = []
tg = textgrid.TextGrid.fromFile(utt2textgrid[uttid_part])
for i in range(tg.__len__()):
for j in range(tg[i].__len__()):
if tg[i][j].mark:
segments.append(Segment(uttid, tg[i].name, tg[i][j].minTime, tg[i][j].maxTime, tg[i][j].mark.strip()))
segments = sorted(segments, key=(lambda x: x.stime))
segments = preposs_overlap(segments, args.max_length, args.overlap_length)
all_segments += segments
wav_scp.close()
textgrid_flist.close()
segments_file = codecs.open((Path(args.path) / 'segments_all'), 'w', 'utf-8')
utt2spk_file = codecs.open((Path(args.path) / 'utt2spk_all'), 'w', 'utf-8')
text_file = codecs.open((Path(args.path) / 'text_all_old'), 'w', 'utf-8')
text_file_new = codecs.open((Path(args.path) / 'text_all'), 'w', 'utf-8')
utt2spk_file_new = codecs.open((Path(args.path) / 'utt2spk_new'), 'w', 'utf-8')
for i in range(len(all_segments)):
utt_name = ('%s-%s-%07d-%07d' % (all_segments[i].uttid, all_segments[i].spkr, (all_segments[i].stime * 100), (all_segments[i].etime * 100)))
segments_file.write(('%s %s %.2f %.2f\n' % (utt_name, all_segments[i].uttid, all_segments[i].stime, all_segments[i].etime)))
utt2spk_file.write(('%s %s-%s\n' % (utt_name, all_segments[i].uttid, all_segments[i].spkr)))
text_file.write(('%s %s\n' % (utt_name, all_segments[i].text)))
spk_array = ''
text_array = ''
for (key, value) in all_segments[i].spk_text.items():
spk_array += ('src' + key)
text_array += ('src' + value)
spk_array = spk_array[3:]
text_array = text_array[3:]
utt2spk_file_new.write(('%s %s\n' % (utt_name, spk_array)))
text_file_new.write(('%s %s\n' % (utt_name, text_array)))
segments_file.close()
utt2spk_file.close()
text_file.close()
utt2spk_file_new.close()
text_file_new.close() |
('/add/host_group', methods=['POST'])
_wrapper_json
_web_opration_log('add_host', get_op_info=add_host_group_log)
def add_host_group():
params = request.get_json(force=True)
(group_name, group_type, hosts) = _check_and_format_params(params['group_name'], params['hosts'])
HostGroupConfDal.add_host_group(group_name, group_type, hosts)
return (group_name, hosts) |
def _find_vcvarsall(plat_spec):
(_, best_dir) = _find_vc2017()
if (not best_dir):
(best_version, best_dir) = _find_vc2015()
if (not best_dir):
log.debug('No suitable Visual C++ version found')
return (None, None)
vcvarsall = os.path.join(best_dir, 'vcvarsall.bat')
if (not os.path.isfile(vcvarsall)):
log.debug('%s cannot be found', vcvarsall)
return (None, None)
return (vcvarsall, None) |
def test_force_locale_with_threading():
app = flask.Flask(__name__)
babel.Babel(app, locale_selector=(lambda : 'de_DE'))
semaphore = Semaphore(value=0)
def first_request():
with app.test_request_context():
with babel.force_locale('en_US'):
assert (str(babel.get_locale()) == 'en_US')
semaphore.acquire()
thread = Thread(target=first_request)
thread.start()
try:
with app.test_request_context():
assert (str(babel.get_locale()) == 'de_DE')
finally:
semaphore.release()
thread.join() |
def build_and_predict_model(ml_input_df):
import cudf
feature_names = (['college_education', 'male'] + [('clicks_in_%d' % i) for i in range(1, 8)])
X = ml_input_df[feature_names]
X = ((X - X.mean()) / X.std())
y = ml_input_df['clicks_in_category']
if isinstance(ml_input_df, cudf.DataFrame):
model_backend = cuml.LogisticRegression
else:
model_backend = sklearn.linear_model.LogisticRegression
model = model_backend(tol=convergence_tol, penalty='none', fit_intercept=True, max_iter=iterations, C=C)
model.fit(X, y)
results_dict = {}
y_pred = model.predict(X)
if isinstance(ml_input_df, cudf.DataFrame):
results_dict['auc'] = roc_auc_score(y.values_host, y_pred.values_host)
results_dict['precision'] = cupy_precision_score(cp.asarray(y), cp.asarray(y_pred))
results_dict['confusion_matrix'] = cuml_confusion_matrix(cp.asarray(y, dtype='int32'), cp.asarray(y_pred, dtype='int32'))
else:
results_dict['auc'] = roc_auc_score(y.to_numpy(), y_pred)
results_dict['precision'] = precision_score(y.to_numpy(), y_pred)
results_dict['confusion_matrix'] = confusion_matrix(y.to_numpy(dtype='int32'), y_pred)
results_dict['output_type'] = 'supervised'
return results_dict |
def test_create_group_deploy_token(gitlab_cli, group):
name = 'group-token'
username = 'root'
expires_at = '2021-09-09'
scopes = 'read_registry'
cmd = ['-v', 'group-deploy-token', 'create', '--group-id', group.id, '--name', name, '--username', username, '--expires-at', expires_at, '--scopes', scopes]
ret = gitlab_cli(cmd)
assert ret.success
assert (name in ret.stdout)
assert (username in ret.stdout)
assert (expires_at in ret.stdout)
assert (scopes in ret.stdout) |
class BCNet(nn.Module):
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU', dropout=[0.2, 0.5], k=3):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.v_dim = v_dim
self.q_dim = q_dim
self.h_dim = h_dim
self.h_out = h_out
self.v_net = FCNet([v_dim, (h_dim * self.k)], act=act, dropout=dropout[0])
self.q_net = FCNet([q_dim, (h_dim * self.k)], act=act, dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1])
if (k > 1):
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if (h_out is None):
pass
elif (h_out <= self.c):
self.h_mat = nn.Parameter(torch.Tensor(1, h_out, 1, (h_dim * self.k)).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear((h_dim * self.k), h_out), dim=None)
def forward(self, v, q):
if (self.h_out is None):
v_ = self.v_net(v).transpose(1, 2).unsqueeze(3)
q_ = self.q_net(q).transpose(1, 2).unsqueeze(2)
d_ = torch.matmul(v_, q_)
logits = d_.transpose(1, 2).transpose(2, 3)
return logits
elif (self.h_out <= self.c):
v_ = self.dropout(self.v_net(v)).unsqueeze(1)
q_ = self.q_net(q)
h_ = (v_ * self.h_mat)
logits = torch.matmul(h_, q_.unsqueeze(1).transpose(2, 3))
logits = (logits + self.h_bias)
return logits
else:
v_ = self.dropout(self.v_net(v)).transpose(1, 2).unsqueeze(3)
q_ = self.q_net(q).transpose(1, 2).unsqueeze(2)
d_ = torch.matmul(v_, q_)
logits = self.h_net(d_.transpose(1, 2).transpose(2, 3))
return logits.transpose(2, 3).transpose(1, 2)
def forward_with_weights(self, v, q, w):
v_ = self.v_net(v).transpose(1, 2).unsqueeze(2)
q_ = self.q_net(q).transpose(1, 2).unsqueeze(3)
logits = torch.matmul(torch.matmul(v_, w.unsqueeze(1)), q_)
logits = logits.squeeze(3).squeeze(2)
if (self.k > 1):
logits = logits.unsqueeze(1)
logits = (self.p_net(logits).squeeze(1) * self.k)
return logits |
class NameInferenceError(InferenceError):
def __init__(self, message: str='{name!r} not found in {scope!r}.', name: (str | None)=None, scope: (nodes.LocalsDictNodeNG | None)=None, context: (InferenceContext | None)=None, **kws: Any) -> None:
self.name = name
self.scope = scope
self.context = context
super().__init__(message, **kws) |
def build_filter_query(key, values):
if (not values):
return ''
if key.startswith('~#'):
nheader = key[2:]
queries = [f'#({nheader} = {i})' for i in values]
if (len(queries) > 1):
return ('|(%s)' % ', '.join(queries))
else:
return queries[0]
else:
text = ', '.join([("'%s'c" % v.replace('\\', '\\\\').replace("'", "\\'")) for v in values])
if (len(values) == 1):
return f'{key} = {text}'
else:
return f'{key} = |({text})' |
def flag_str(event_name, field_name, value):
string = ''
if flag_fields[event_name][field_name]:
print_delim = 0
for idx in sorted(flag_fields[event_name][field_name]['values']):
if ((not value) and (not idx)):
string += flag_fields[event_name][field_name]['values'][idx]
break
if (idx and ((value & idx) == idx)):
if (print_delim and flag_fields[event_name][field_name]['delim']):
string += ((' ' + flag_fields[event_name][field_name]['delim']) + ' ')
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= (~ idx)
return string |
def versions_from_parentdir(parentdir_prefix, root, verbose):
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {'version': dirname[len(parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None, 'date': None}
else:
rootdirs.append(root)
root = os.path.dirname(root)
if verbose:
print(('Tried directories %s but none started with prefix %s' % (str(rootdirs), parentdir_prefix)))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix") |
def init_pretrained_weights(model, model_url):
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if ((k in model_dict) and (model_dict[k].size() == v.size()))}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print('Initialized model with pretrained weights from {}'.format(model_url)) |
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main) |
def get_data(relative_path: str) -> str:
from pkg_resources import resource_filename
fn = resource_filename('qubekit', os.path.join('data', relative_path))
if (not os.path.exists(fn)):
raise ValueError(f"{relative_path} does not exist. If you have just added it, you'll have to re-install")
return fn |
def measure_rss_deltas(rss_deltas: List[int], interval: timedelta=_DEFAULT_MEASURE_INTERVAL) -> Generator[(None, None, None)]:
baseline_rss_bytes = psutil.Process().memory_info().rss
stop_event = Event()
thread = Thread(target=_measure, args=(rss_deltas, interval, baseline_rss_bytes, stop_event))
thread.start()
try:
(yield)
finally:
stop_event.set()
thread.join() |
_canonicalize
_rewriter([pt_abs])
def local_abs_lift(fgraph, node):
if ((node.op == pt_abs) and node.inputs[0].owner):
assert (node.nin == 1)
if (node.inputs[0].owner.op == mul):
return [mul(*[pt_abs(i) for i in node.inputs[0].owner.inputs])]
if (node.inputs[0].owner.op == true_div):
i = node.inputs[0].owner.inputs
return [true_div(pt_abs(i[0]), pt_abs(i[1]))] |
def test_history_expanded_with_invalid_options(base_app):
options_to_test = ['-r', '-e', '-o file', '-t file', '-c']
for opt in options_to_test:
(out, err) = run_cmd(base_app, ('history -x ' + opt))
assert (len(out) == 4)
assert (out[0] == '-s and -x cannot be used with -c, -r, -e, -o, or -t')
assert out[1].startswith('Usage:')
assert (base_app.last_result is False) |
def _fit_desoto_sandia_diode(ee, voc, vth, tc, specs, const):
try:
import statsmodels.api as sm
except ImportError:
raise ImportError('Parameter extraction using Sandia method requires statsmodels')
x = ((specs['cells_in_series'] * vth) * np.log((ee / const['E0'])))
y = (voc - (specs['beta_voc'] * (tc - const['T0'])))
new_x = sm.add_constant(x)
res = sm.RLM(y, new_x).fit()
return np.array(res.params)[1] |
def run_unittests(project_path, dirs=[], coverage=False):
if (not coverage):
run_command(_Python_path, os.path.join(project_path, 'test', 'runtest.py'), '-verbose', str((_Verbose + 1)), '-clean', '-pre', *dirs)
else:
run_command(_Coverage, '-x', os.path.join(project_path, 'test', 'runtest.py'), '-verbose', str((_Verbose + 1)), '-clean', '-pre', *dirs)
mods = get_files(locations=[_Src_dir], pattern='*.py')
run_command(_Coverage, '-r', *mods)
run_command(_Coverage, '-a', *mods)
coverage_out_file = '.coverage'
if (os.path.exists(coverage_out_file) and (not os.path.isdir(coverage_out_file))):
os.unlink(coverage_out_file) |
def all_inputs_are_scalar(node):
ndims_input = [inp.type.ndim for inp in node.inputs]
are_inputs_scalars = True
for ndim in ndims_input:
try:
if (ndim > 0):
are_inputs_scalars = False
except TypeError:
are_inputs_scalars = False
return are_inputs_scalars |
def get_constraints(total: Optional[int]=None, chunksize: Optional[int]=None, sequential_threshold: int=1, max_depth: Optional[int]=None, max_size: Optional[int]=None, max_leaves: Optional[int]=None, branch_factor: int=2) -> TreeConstraints:
cls = TreeConstraintsSize
if (total is None):
if (chunksize is None):
raise ValueError('if no total is given, chunksize must be provided')
if (not all(((arg is None) for arg in [max_size, max_leaves]))):
raise ValueError(f'if no total is given, only max_depth can be enforced; got max_size={max_size}, max_leaves={max_leaves}')
cls = TreeConstraintsChunksize
return cls(total=total, chunksize=chunksize, sequential_threshold=sequential_threshold, max_depth=max_depth, max_size=max_size, max_leaves=max_leaves, branch_factor=branch_factor) |
def test_files_reordered_when_seed_not_reset(ourtester):
code = '\n def test_it():\n pass\n '
ourtester.makepyfile(test_a=code, test_b=code, test_c=code, test_d=code)
args = ['-v', '--randomly-seed=15']
args.append('--randomly-dont-reset-seed')
out = ourtester.runpytest(*args)
out.assert_outcomes(passed=4, failed=0)
assert (out.outlines[9:13] == ['test_b.py::test_it PASSED', 'test_a.py::test_it PASSED', 'test_d.py::test_it PASSED', 'test_c.py::test_it PASSED']) |
class AffineMul(torch.autograd.Function):
def forward(ctx, input, gamma, row_rank, col_rank, ddp_rank, model_parallel_size, dim, dtype):
with torch.no_grad():
if (row_rank == 0):
gamma_temp = gamma.clone()
else:
gamma_temp = torch.zeros(dim, dtype=dtype, device=torch.cuda.current_device())
torch.distributed.broadcast(gamma_temp, src=(col_rank + (ddp_rank * model_parallel_size)), group=mpu.get_summa_col_group())
output = (input * gamma_temp)
ctx.save_for_backward(input, gamma_temp)
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.ddp_rank = ddp_rank
ctx.model_parallel_size = model_parallel_size
return output
def backward(ctx, output_grad):
(input, gamma_temp) = ctx.saved_tensors
input_grad = (output_grad * gamma_temp)
gamma_grad = torch.sum((output_grad * input), dim=[0, 1])
torch.distributed.reduce(gamma_grad, dst=(ctx.col_rank + (ctx.ddp_rank * ctx.model_parallel_size)), group=mpu.get_summa_col_group())
if (ctx.row_rank == 0):
return (input_grad, gamma_grad, None, None, None, None, None, None)
else:
return (input_grad, None, None, None, None, None, None, None) |
class Settlement(Resource):
def __init__(self, client=None):
super(Settlement, self).__init__(client)
self.base_url = (URL.V1 + URL.SETTLEMENT_URL)
def all(self, data={}, **kwargs):
return super(Settlement, self).all(data, **kwargs)
def fetch(self, settlement_id, data={}, **kwargs):
return super(Settlement, self).fetch(settlement_id, data, **kwargs)
def report(self, data={}, **kwargs):
url = '{}/recon/{}'.format(self.base_url, 'combined')
return self.get_url(url, data, **kwargs)
def create_ondemand_settlement(self, data={}, **kwargs):
url = '{}/{}'.format(self.base_url, 'ondemand')
return self.post_url(url, data, **kwargs)
def fetch_all_ondemand_settlement(self, data={}, **kwargs):
url = '{}/{}'.format(self.base_url, 'ondemand')
return self.get_url(url, data, **kwargs)
def fetch_ondemand_settlement_id(self, settlement_id, data={}, **kwargs):
url = '{}/ondemand/{}'.format(self.base_url, settlement_id)
return self.get_url(url, data, **kwargs) |
_canonicalize
_stabilize
_rewriter([Blockwise])
def cholesky_ldotlt(fgraph, node):
if (not isinstance(node.op.core_op, Cholesky)):
return
A = node.inputs[0]
if (not ((A.owner is not None) and ((isinstance(A.owner.op, (Dot, Dot22)) and (A.owner.inputs[0].type.ndim == 2)) or (A.owner.op == _matrix_matrix_matmul)))):
return
(l, r) = A.owner.inputs
if (getattr(l.tag, 'lower_triangular', False) and is_matrix_transpose(r) and (r.owner.inputs[0] == l)):
if node.op.core_op.lower:
return [l]
return [r]
if (getattr(r.tag, 'upper_triangular', False) and is_matrix_transpose(l) and (l.owner.inputs[0] == r)):
if node.op.core_op.lower:
return [l]
return [r] |
class PlaneWaveHamiltonianTest(unittest.TestCase):
def test_plane_wave_hamiltonian_integration(self):
length_set = [2, 3, 4]
spinless_set = [True, False]
length_scale = 1.1
for geometry in [[('H', (0,)), ('H', (0.8,))], [('H', (0.1,))], [('H', (0.1,))]]:
for l in length_set:
for spinless in spinless_set:
grid = Grid(dimensions=1, scale=length_scale, length=l)
h_plane_wave = plane_wave_hamiltonian(grid, geometry, spinless, True, include_constant=False)
h_dual_basis = plane_wave_hamiltonian(grid, geometry, spinless, False, include_constant=False)
plane_wave_operator = get_sparse_operator(h_plane_wave)
dual_operator = get_sparse_operator(h_dual_basis)
self.assertTrue(is_hermitian(plane_wave_operator))
self.assertTrue(is_hermitian(dual_operator))
jw_h_plane_wave = jordan_wigner(h_plane_wave)
jw_h_dual_basis = jordan_wigner(h_dual_basis)
h_plane_wave_spectrum = eigenspectrum(jw_h_plane_wave)
h_dual_basis_spectrum = eigenspectrum(jw_h_dual_basis)
max_diff = np.amax((h_plane_wave_spectrum - h_dual_basis_spectrum))
min_diff = np.amin((h_plane_wave_spectrum - h_dual_basis_spectrum))
self.assertAlmostEqual(max_diff, 0)
self.assertAlmostEqual(min_diff, 0)
def test_plane_wave_hamiltonian_default_to_jellium_with_no_geometry(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
self.assertTrue((plane_wave_hamiltonian(grid) == jellium_model(grid)))
def test_plane_wave_hamiltonian_bad_geometry(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
with self.assertRaises(ValueError):
plane_wave_hamiltonian(grid, geometry=[('H', (0, 0, 0))])
with self.assertRaises(ValueError):
plane_wave_hamiltonian(grid, geometry=[('H', (0, 0, 0))], include_constant=True)
def test_plane_wave_hamiltonian_bad_element(self):
grid = Grid(dimensions=3, scale=1.0, length=4)
with self.assertRaises(ValueError):
plane_wave_hamiltonian(grid, geometry=[('Unobtainium', (0, 0, 0))])
def test_jordan_wigner_dual_basis_hamiltonian(self):
grid = Grid(dimensions=2, length=3, scale=1.0)
spinless_set = [True, False]
geometry = [('H', (0, 0)), ('H', (0.5, 0.8))]
for spinless in spinless_set:
fermion_hamiltonian = plane_wave_hamiltonian(grid, geometry, spinless, False, include_constant=False)
qubit_hamiltonian = jordan_wigner(fermion_hamiltonian)
test_hamiltonian = jordan_wigner_dual_basis_hamiltonian(grid, geometry, spinless, include_constant=False)
self.assertTrue((test_hamiltonian == qubit_hamiltonian))
def test_jordan_wigner_dual_basis_hamiltonian_default_to_jellium(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
self.assertTrue((jordan_wigner_dual_basis_hamiltonian(grid) == jordan_wigner(jellium_model(grid, plane_wave=False))))
def test_jordan_wigner_dual_basis_hamiltonian_bad_geometry(self):
grid = Grid(dimensions=1, scale=1.0, length=4)
with self.assertRaises(ValueError):
jordan_wigner_dual_basis_hamiltonian(grid, geometry=[('H', (0, 0, 0))])
with self.assertRaises(ValueError):
jordan_wigner_dual_basis_hamiltonian(grid, geometry=[('H', (0, 0, 0))], include_constant=True)
def test_jordan_wigner_dual_basis_hamiltonian_bad_element(self):
grid = Grid(dimensions=3, scale=1.0, length=4)
with self.assertRaises(ValueError):
jordan_wigner_dual_basis_hamiltonian(grid, geometry=[('Unobtainium', (0, 0, 0))])
def test_plane_wave_energy_cutoff(self):
geometry = [('H', (0,)), ('H', (0.8,))]
grid = Grid(dimensions=1, scale=1.1, length=5)
e_cutoff = 50.0
h_1 = plane_wave_hamiltonian(grid, geometry, True, True, False)
jw_1 = jordan_wigner(h_1)
spectrum_1 = eigenspectrum(jw_1)
h_2 = plane_wave_hamiltonian(grid, geometry, True, True, False, e_cutoff)
jw_2 = jordan_wigner(h_2)
spectrum_2 = eigenspectrum(jw_2)
max_diff = np.amax(np.absolute((spectrum_1 - spectrum_2)))
self.assertGreater(max_diff, 0.0)
def test_plane_wave_period_cutoff(self):
geometry = [('H', (0,)), ('H', (0.8,))]
grid = Grid(dimensions=1, scale=1.1, length=5)
period_cutoff = 50.0
h_1 = plane_wave_hamiltonian(grid, geometry, True, True, False, None)
jw_1 = jordan_wigner(h_1)
spectrum_1 = eigenspectrum(jw_1)
h_2 = plane_wave_hamiltonian(grid, geometry, True, True, False, None, True, period_cutoff)
jw_2 = jordan_wigner(h_2)
spectrum_2 = eigenspectrum(jw_2)
max_diff = np.amax(np.absolute((spectrum_1 - spectrum_2)))
self.assertGreater(max_diff, 0.0)
plane_wave_hamiltonian(grid, geometry, True, True, False, None, True) |
class Timer(Signal):
def __init__(self, interval=1.0, oneshot=True):
Signal.__init__(self)
self.interval = interval
self.oneshot = oneshot
self._timeout = 0
def interval():
def fget(self):
return self._interval
def fset(self, value):
if (not isinstance(value, (int, float))):
raise ValueError('interval must be a float or integer.')
if (value <= 0):
raise ValueError('interval must be larger than 0.')
self._interval = float(value)
return locals()
def oneshot():
def fget(self):
return self._oneshot
def fset(self, value):
self._oneshot = bool(value)
return locals()
def running(self):
return (self._timeout > 0)
def start(self, interval=None, oneshot=None):
if (interval is not None):
self.interval = interval
if (oneshot is not None):
self.oneshot = oneshot
self._timeout = (time.time() + self.interval)
theTimerThread.add(self)
def stop(self):
theTimerThread.discard(self)
self._timeout = 0
def _on_timeout(self):
self.emit()
if self.oneshot:
self._timeout = 0
return False
else:
self._timeout = (time.time() + self.interval)
return True |
def config_optimizer(optimizer_name, learning_rate, decay=0.9, momentum=0.9):
if (optimizer_name == 'momentum'):
return tf.train.MomentumOptimizer(learning_rate, momentum=momentum)
elif (optimizer_name == 'rmsprop'):
return tf.train.RMSPropOptimizer(learning_rate, decay=decay, momentum=momentum)
elif (optimizer_name == 'adam'):
return tf.train.AdamOptimizer(learning_rate)
elif (optimizer_name == 'sgd'):
return tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Unsupported optimizer type!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.