code stringlengths 281 23.7M |
|---|
def deconv2d(input_, output_dim, ks=4, s=2, stddev=0.02, name='deconv2d'):
with tf.variable_scope(name):
return slim.conv2d_transpose(input_, output_dim, ks, s, padding='SAME', activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=stddev), biases_initializer=None) |
class TestClickThroughRate(MetricClassTester):
def test_ctr_with_valid_input(self) -> None:
input = torch.tensor([[1, 0, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 1, 1]])
self.run_class_implementation_tests(metric=ClickThroughRate(), state_names={'click_total', 'weight_total'}, update_kwargs={'input': input}, compute_result=torch.tensor([0.5625], dtype=torch.float64), num_total_updates=4, num_processes=2)
input = torch.tensor([[[1, 0, 0, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [1, 1, 1, 1]], [[0, 1, 0, 1], [0, 1, 0, 1]], [[1, 1, 1, 1], [0, 1, 1, 1]]])
weights = torch.tensor([[[1, 2, 3, 4], [0, 0, 0, 0]], [[1, 2, 1, 2], [1, 2, 1, 2]], [[1, 1, 1, 1], [1, 1, 3, 1]], [[1, 1, 1, 1], [1, 1, 1, 1]]])
self.run_class_implementation_tests(metric=ClickThroughRate(num_tasks=2), state_names={'click_total', 'weight_total'}, update_kwargs={'input': input, 'weights': weights}, compute_result=torch.tensor([0.4583333, 0.6875], dtype=torch.float64), num_total_updates=4, num_processes=2)
weights = [4.0, 1, torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]), 0.0]
self.run_class_implementation_tests(metric=ClickThroughRate(num_tasks=2), state_names={'click_total', 'weight_total'}, update_kwargs={'input': input, 'weights': weights}, compute_result=torch.tensor([0., 0.], dtype=torch.float64), num_total_updates=4, num_processes=2)
def test_ctr_with_invalid_input(self) -> None:
metric = ClickThroughRate()
with self.assertRaisesRegex(ValueError, '^`input` should be a one or two dimensional tensor'):
metric.update(torch.rand(3, 2, 2))
metric = ClickThroughRate()
with self.assertRaisesRegex(ValueError, '^tensor `weights` should have the same shape as tensor `input`'):
metric.update(torch.rand(4, 2), torch.rand(3))
with self.assertRaisesRegex(ValueError, '`num_tasks = 1`, `input` is expected to be one-dimensional tensor,'):
metric.update(torch.tensor([[1, 1], [0, 1]]))
metric = ClickThroughRate(num_tasks=2)
with self.assertRaisesRegex(ValueError, "`num_tasks = 2`, `input`'s shape is expected to be"):
metric.update(torch.tensor([1, 0, 0, 1]))
with self.assertRaisesRegex(ValueError, '`num_tasks` value should be greater than and equal to 1,'):
metric = ClickThroughRate(num_tasks=0) |
def test_graph_crf_class_weights():
crf = GraphCRF(n_states=3, n_features=3)
w = np.array([1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
x = (np.array([[1, 1.5, 1.1]]), np.empty((0, 2)))
assert_equal(crf.inference(x, w), 1)
assert_equal(crf.loss_augmented_inference(x, [1], w), 2)
crf = GraphCRF(n_states=3, n_features=3, class_weight=[1, 0.1, 1])
assert_equal(crf.loss_augmented_inference(x, [1], w), 1) |
('beeref.view.BeeGraphicsView.reset_previous_transform')
('beeref.view.BeeGraphicsView.pan')
def test_zoom_out(pan_mock, reset_mock, view, imgfilename3x3):
item = BeePixmapItem(QtGui.QImage(imgfilename3x3))
view.scale(100, 100)
view.scene.addItem(item)
view.zoom((- 40), QtCore.QPointF(10.0, 10.0))
assert (view.get_scale() == (100 / 1.04))
reset_mock.assert_called_once_with()
pan_mock.assert_called_once() |
_module()
class FPEM_FFM(BaseModule):
def __init__(self, in_channels, conv_out=128, fpem_repeat=2, align_corners=False, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')):
super().__init__(init_cfg=init_cfg)
self.reduce_conv_c2 = nn.Sequential(nn.Conv2d(in_channels=in_channels[0], out_channels=conv_out, kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.reduce_conv_c3 = nn.Sequential(nn.Conv2d(in_channels=in_channels[1], out_channels=conv_out, kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.reduce_conv_c4 = nn.Sequential(nn.Conv2d(in_channels=in_channels[2], out_channels=conv_out, kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.reduce_conv_c5 = nn.Sequential(nn.Conv2d(in_channels=in_channels[3], out_channels=conv_out, kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.align_corners = align_corners
self.fpems = ModuleList()
for _ in range(fpem_repeat):
self.fpems.append(FPEM(conv_out))
def forward(self, x):
(c2, c3, c4, c5) = x
c2 = self.reduce_conv_c2(c2)
c3 = self.reduce_conv_c3(c3)
c4 = self.reduce_conv_c4(c4)
c5 = self.reduce_conv_c5(c5)
for (i, fpem) in enumerate(self.fpems):
(c2, c3, c4, c5) = fpem(c2, c3, c4, c5)
if (i == 0):
c2_ffm = c2
c3_ffm = c3
c4_ffm = c4
c5_ffm = c5
else:
c2_ffm = (c2_ffm + c2)
c3_ffm = (c3_ffm + c3)
c4_ffm = (c4_ffm + c4)
c5_ffm = (c5_ffm + c5)
c5 = F.interpolate(c5_ffm, c2_ffm.size()[(- 2):], mode='bilinear', align_corners=self.align_corners)
c4 = F.interpolate(c4_ffm, c2_ffm.size()[(- 2):], mode='bilinear', align_corners=self.align_corners)
c3 = F.interpolate(c3_ffm, c2_ffm.size()[(- 2):], mode='bilinear', align_corners=self.align_corners)
outs = [c2_ffm, c3, c4, c5]
return tuple(outs) |
def pytest_configure(config: Config) -> None:
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if (config.option.debug or config.option.traceconfig):
def mywriter(tags, args):
msg = ' '.join(map(str, args))
reporter.write_line(('[traceconfig] ' + msg))
config.trace.root.setprocessor('pytest:config', mywriter) |
class Repeat(Op):
__props__ = ('axis',)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x, repeats):
x = ptb.as_tensor_variable(x)
repeats = ptb.as_tensor_variable(repeats)
if (repeats.dtype not in integer_dtypes):
raise TypeError('repeats.dtype must be an integer.')
ptr_bitwidth = LOCAL_BITWIDTH
if (ptr_bitwidth == 64):
numpy_unsupported_dtypes = ('uint64',)
if (ptr_bitwidth == 32):
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
if (repeats.dtype in numpy_unsupported_dtypes):
raise TypeError(("dtypes %s are not supported by numpy.repeat for the 'repeats' parameter, " % str(numpy_unsupported_dtypes)), repeats.dtype)
if (self.axis is None):
out_shape = [None]
else:
try:
const_reps = ptb.get_underlying_scalar_constant_value(repeats)
except NotScalarConstantError:
const_reps = None
if (const_reps == 1):
out_shape = x.type.shape
else:
out_shape = list(x.type.shape)
out_shape[self.axis] = None
out_type = TensorType(x.dtype, shape=tuple(((1 if (s == 1) else None) for s in out_shape)))
return Apply(self, [x, repeats], [out_type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
repeats = inputs[1]
z = output_storage[0]
z[0] = np.repeat(x, repeats=repeats, axis=self.axis)
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inputs, gout):
(x, repeats) = inputs
(gz,) = gout
if (repeats.ndim == 0):
if (self.axis is None):
axis = x.ndim
elif (self.axis >= 0):
axis = (self.axis + 1)
else:
axis = ((self.axis + x.ndim) + 1)
shape = [x.shape[k] for k in range(x.ndim)]
shape.insert(axis, repeats)
return [gz.reshape(shape, ndim=(x.ndim + 1)).sum(axis=axis), DisconnectedType()()]
elif (repeats.ndim == 1):
raise NotImplementedError()
else:
raise ValueError()
def infer_shape(self, fgraph, node, ins_shapes):
i0_shapes = ins_shapes[0]
repeats = node.inputs[1]
out_shape = list(i0_shapes)
dtype = None
if (repeats.dtype in ('uint8', 'uint16', 'uint32')):
dtype = 'int64'
if (self.axis is None):
if (repeats.ndim == 0):
if (len(i0_shapes) == 0):
out_shape = [repeats]
else:
res = 1
for d in i0_shapes:
res = (res * d)
out_shape = ((res * repeats),)
else:
out_shape = [pt_sum(repeats, dtype=dtype)]
elif (repeats.ndim == 0):
out_shape[self.axis] = (out_shape[self.axis] * repeats)
else:
out_shape[self.axis] = pt_sum(repeats, dtype=dtype)
return [out_shape] |
def _b(mu, nu, sigma, n, a, k, collection):
if (nu == (mu + 1)):
while (a[nu] < (mu - 1)):
(yield _visit(n, a, k, collection))
a[nu] = (a[nu] + 1)
(yield _visit(n, a, k, collection))
a[mu] = 0
elif (nu > (mu + 1)):
if (((a[nu] + sigma) % 2) == 1):
for v in _f(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
else:
for v in _b(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
while (a[nu] < (mu - 1)):
a[nu] = (a[nu] + 1)
if (((a[nu] + sigma) % 2) == 1):
for v in _f(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
else:
for v in _b(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
if (((mu + sigma) % 2) == 1):
a[(nu - 1)] = 0
else:
a[mu] = 0
if (mu == 2):
(yield _visit(n, a, k, collection))
else:
for v in _b((mu - 1), (nu - 1), ((mu + sigma) % 2), n, a, k, collection):
(yield v) |
_funcify.register(Unique)
def jax_funcify_Unique(op, **kwargs):
axis = op.axis
if (axis is not None):
raise NotImplementedError('jax.numpy.unique is not implemented for the axis argument')
return_index = op.return_index
return_inverse = op.return_inverse
return_counts = op.return_counts
def unique(x, return_index=return_index, return_inverse=return_inverse, return_counts=return_counts, axis=axis):
ret = jnp.lax_numpy._unique1d(x, return_index, return_inverse, return_counts)
if (len(ret) == 1):
return ret[0]
else:
return ret
return unique |
class WaveEncoder(MediaEncoder):
def get_file_extensions(self):
return ('.wav', '.wave', '.riff')
def encode(self, source, filename, file):
opened_file = None
if (file is None):
file = open(filename, 'wb')
opened_file = True
source.seek(0)
wave_writer = wave.open(file, mode='wb')
wave_writer.setnchannels(source.audio_format.channels)
wave_writer.setsampwidth(source.audio_format.bytes_per_sample)
wave_writer.setframerate(source.audio_format.sample_rate)
chunksize = source.audio_format.bytes_per_second
audiodata = source.get_audio_data(chunksize)
while audiodata:
wave_writer.writeframes(audiodata.data)
audiodata = source.get_audio_data(chunksize)
else:
wave_writer.close()
if opened_file:
file.close() |
_cache(maxsize=None)
def parse_constraint(constraints: str) -> BaseConstraint:
if (constraints == '*'):
return AnyConstraint()
or_constraints = re.split('\\s*\\|\\|?\\s*', constraints.strip())
or_groups = []
for constraints in or_constraints:
and_constraints = re.split('(?<!^)(?<![=>< ,]) *(?<!-)[, ](?!-) *(?!,|$)', constraints)
constraint_objects = []
if (len(and_constraints) > 1):
for constraint in and_constraints:
constraint_objects.append(parse_single_constraint(constraint))
else:
constraint_objects.append(parse_single_constraint(and_constraints[0]))
if (len(constraint_objects) == 1):
constraint = constraint_objects[0]
else:
constraint = constraint_objects[0]
for next_constraint in constraint_objects[1:]:
constraint = constraint.intersect(next_constraint)
or_groups.append(constraint)
if (len(or_groups) == 1):
return or_groups[0]
else:
return UnionConstraint(*or_groups) |
class ImplantSet():
def __init__(self, name=None):
self.name = name
self.__implants = HandledImplantList()
def implants(self):
return self.__implants
def exportSets(cls, *sets):
out = '# Exported from pyfa\n#\n# Values are in following format:\n# [Implant Set name]\n# [Implant name]\n# [Implant name]\n# ...\n\n'
for set in sets:
out += '[{}]\n'.format(set.name)
for implant in set.implants:
out += '{}\n'.format(implant.item.name)
out += '\n'
return out.strip()
def __deepcopy__(self, memo):
copy = ImplantSet(self.name)
copy.name = ('%s copy' % self.name)
orig = getattr(self, 'implants')
c = getattr(copy, 'implants')
for i in orig:
c.append(deepcopy(i))
return copy |
_test
def test_gaussiandropout_legacy_interface():
old_layer = keras.layers.GaussianDropout(p=0.6, name='drop')
new_layer_1 = keras.layers.GaussianDropout(rate=0.6, name='drop')
new_layer_2 = keras.layers.GaussianDropout(0.6, name='drop')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer_1.get_config()))
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer_2.get_config())) |
def test_task_will_be_executed_after_another_one_with_function(tmp_path):
source = '\n from pytask import task\n from pathlib import Path\n from typing_extensions import Annotated\n\n def task_first() -> Annotated[str, Path("out.txt")]:\n return "Hello, World!"\n\n (after=task_first)\n def task_second():\n assert Path(__file__).parent.joinpath("out.txt").exists()\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
session = build(paths=tmp_path)
assert (session.exit_code == ExitCode.OK) |
def pytest_configure(config: Config) -> None:
config.addinivalue_line('markers', "parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: ('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see for more info and examples.")
config.addinivalue_line('markers', 'usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see ') |
def main():
logging.basicConfig(level=logging.DEBUG)
logging.info(sys.argv)
if (len(sys.argv) != 4):
logging.error('Usage: python3 scripts/tests_required.py <image.name> <image.github_location> <output.txt>')
sys.exit(1)
image_name = sys.argv[1]
image_github_location = sys.argv[2]
output_file = sys.argv[3]
tests_required = check_if_tests_required(image_name, image_github_location)
output_test_required(output_file, tests_required) |
class PingCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(PingCollector, self).get_default_config_help()
config_help.update({'bin': 'The path to the ping binary'})
return config_help
def get_default_config(self):
config = super(PingCollector, self).get_default_config()
config.update({'path': 'ping', 'bin': '/bin/ping'})
return config
def collect(self):
for key in self.config.keys():
if (key[:7] == 'target_'):
host = self.config[key]
metric_name = host.replace('.', '_')
ping = self.run_command(['-nq', '-c 1', host])
ping = ping[0].strip().split('\n')[(- 1)]
if ping.startswith('rtt'):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
elif ping.startswith('round-trip '):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
else:
metric_value = 10000
self.publish(metric_name, metric_value, precision=3) |
class TestHatchPersonalProjectConfigFile():
def test_correct(self, temp_dir, helpers):
metadata = ProjectMetadata(str(temp_dir), PluginManager(), {'project': {'name': 'foo', 'dynamic': ['version']}, 'tool': {'hatch': {'build': {'reproducible': False}}}})
file_path = ((temp_dir / 'a') / 'b')
file_path.ensure_parent_dir_exists()
file_path.write_text('__version__ = "0.0.1"')
(temp_dir / 'pyproject.toml').touch()
file_path = (temp_dir / 'hatch.toml')
file_path.write_text(helpers.dedent("\n [version]\n path = 'a/b'\n "))
assert (metadata.version == '0.0.1')
assert (metadata.hatch.build_config['reproducible'] is False)
def test_precedence(self, temp_dir, helpers):
metadata = ProjectMetadata(str(temp_dir), PluginManager(), {'project': {'name': 'foo', 'dynamic': ['version']}, 'tool': {'hatch': {'version': {'path': 'a/b'}, 'build': {'reproducible': False}}}})
file_path = ((temp_dir / 'a') / 'b')
file_path.ensure_parent_dir_exists()
file_path.write_text('__version__ = "0.0.1"')
file_path = ((temp_dir / 'c') / 'd')
file_path.ensure_parent_dir_exists()
file_path.write_text('__version__ = "0.0.2"')
(temp_dir / 'pyproject.toml').touch()
file_path = (temp_dir / 'hatch.toml')
file_path.write_text(helpers.dedent("\n [version]\n path = 'c/d'\n "))
assert (metadata.version == '0.0.2')
assert (metadata.hatch.build_config['reproducible'] is False) |
def main():
args = parse_args()
model_zoo = args.model_zoo
dst_folder = args.dst_folder
bucket = oss2.Bucket(oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET), ENDPOINT, BUCKET_NAME)
for (root, dirs, files) in os.walk(model_zoo):
for file in files:
file_path = osp.relpath(osp.join(root, file), model_zoo)
print(f'Uploading {file_path}')
oss2.resumable_upload(bucket, osp.join(dst_folder, file_path), osp.join(model_zoo, file_path))
bucket.put_object_acl(osp.join(dst_folder, file_path), oss2.OBJECT_ACL_PUBLIC_READ) |
class BaseDatasetBuilder():
def __init__(self, dataset_name):
self.dataset_name = dataset_name
def load(self, dataset_type, config, *args, **kwargs):
dataset = self._load(dataset_type, config, *args, **kwargs)
if (dataset is not None):
dataset.init_processors()
dataset.try_fast_read()
return dataset
def _load(self, dataset_type, config, *args, **kwargs):
raise NotImplementedError("This dataset builder doesn't implement a load method")
def build(self, dataset_type, config, *args, **kwargs):
if is_main_process():
self._build(dataset_type, config, *args, **kwargs)
synchronize()
def _build(self, dataset_type, config, *args, **kwargs):
raise NotImplementedError("This dataset builder doesn't implement a build method") |
class NormalisedGaussianKDEStorageRecorder(NumpyArrayNormalisedStorageRecorder):
def __init__(self, *args, **kwargs):
self.resample_freq = kwargs.pop('resample_freq', None)
self.resample_func = kwargs.pop('resample_func', None)
self.use_reflection = kwargs.pop('use_reflection', True)
self.num_pdf = kwargs.pop('num_pdf', 101)
super().__init__(*args, **kwargs)
self._probability_of_target_volume = None
self._pdf = None
def reset(self):
super().reset()
self._probability_of_target_volume = None
self._pdf = None
def finish(self):
super().finish()
df = super().to_dataframe()
if ((self.resample_func is not None) and (self.resample_freq is not None)):
df = df.resample(self.resample_freq).agg(self.resample_func)
x = np.linspace(0.0, 1.0, self.num_pdf)
(p, pdf) = self._estimate_pdf_and_target_probability(df.values.flatten(), x)
self._probability_of_target_volume = p
self._pdf = pandas.DataFrame(data=pdf, index=x)
def values(self):
return self._pdf.values
def to_dataframe(self):
return self._pdf
def aggregated_value(self):
return self._probability_of_target_volume
def _estimate_pdf_and_target_probability(self, values, x) -> Tuple[(float, np.ndarray)]:
kernel = stats.gaussian_kde(values)
p = kernel.integrate_box_1d((- 1.0), 0.0)
pdf = kernel(x)
if self.use_reflection:
kernel_lb = stats.gaussian_kde(((- 2.0) - values))
p += kernel_lb.integrate_box_1d((- 1.0), 0.0)
pdf += kernel_lb(x)
kernel_ub = stats.gaussian_kde((2.0 - values))
p += kernel_ub.integrate_box_1d((- 1.0), 0.0)
pdf += kernel_ub(x)
return (p, pdf) |
def _remove_from_contactgroup(my_object, contactgroup):
if isinstance(contactgroup, six.string_types):
contactgroup = Contactgroup.objects.get_by_shortname(contactgroup)
contactgroup_name = contactgroup.contactgroup_name
if (my_object.object_type == 'contact'):
return _remove_object_from_group(my_object, contactgroup)
current_contactgroups = AttributeList(my_object.contact_groups)
if (contactgroup_name in current_contactgroups.fields):
my_object.attribute_removefield('contact_groups', contactgroup_name)
my_object.save()
return True
else:
return False |
def floats_tensor(shape, scale=1.0, rng=None, name=None):
if (rng is None):
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append((rng.random() * scale))
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() |
class TestVersions():
def test_default_known(self, isolation):
builder = MockBuilder(str(isolation))
builder.PLUGIN_NAME = 'foo'
builder.get_version_api = (lambda : {'2': str, '1': str})
assert (builder.config.versions == builder.config.versions == ['2', '1'])
def test_default_override(self, isolation):
builder = MockBuilder(str(isolation))
builder.PLUGIN_NAME = 'foo'
builder.get_default_versions = (lambda : ['old', 'new', 'new'])
assert (builder.config.versions == builder.config.versions == ['old', 'new'])
def test_invalid_type(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'versions': ''}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Field `tool.hatch.build.targets.foo.versions` must be an array of strings'):
_ = builder.config.versions
def test_correct(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'versions': ['3.14', '1', '3.14']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
builder.get_version_api = (lambda : {'3.14': str, '42': str, '1': str})
assert (builder.config.versions == builder.config.versions == ['3.14', '1'])
def test_empty_default(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'versions': []}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
builder.get_default_versions = (lambda : ['old', 'new'])
assert (builder.config.versions == builder.config.versions == ['old', 'new'])
def test_version_not_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'versions': [1]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Version #1 in field `tool.hatch.build.targets.foo.versions` must be a string'):
_ = builder.config.versions
def test_version_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'versions': ['']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(ValueError, match='Version #1 in field `tool.hatch.build.targets.foo.versions` cannot be an empty string'):
_ = builder.config.versions
def test_unknown_version(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'versions': ['9000', '1', '42']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
builder.get_version_api = (lambda : {'1': str})
with pytest.raises(ValueError, match='Unknown versions in field `tool.hatch.build.targets.foo.versions`: 42, 9000'):
_ = builder.config.versions |
def recover_coef2(seed):
input_list = ['m', 'k', 'A0', 'c']
output_coef = 'k_coef'
D_in = np.mat('1, 0, 0; 1, 0, -2; 0, 1, 0; 1, 0, -1').T
D_out = np.mat('0;, 0; -1')
dimension_info = [D_in, D_out]
basis1_in = np.array([1, 1, 0, (- 2)]).reshape((- 1), 1)
basis2_in = np.array([0, 1, 0, (- 1)]).reshape((- 1), 1)
basis_list = [basis1_in, basis2_in]
dimensionless_learning = DimensionlessLearning(df, input_list, output_coef, dimension_info, basis_list)
(r2, coef, coef_w) = dimensionless_learning.fit_pattern_search(seed=seed)
if (r2 > 0.8):
print('final r2', r2, coef.flatten(), coef_w) |
class Effect4256(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
groups = ('Missile Launcher Heavy', 'Missile Launcher Rapid Light', 'Missile Launcher Heavy Assault')
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name in groups)), 'speed', src.getModifiedItemAttr('subsystemBonusMinmatarOffensive2'), skill='Minmatar Offensive Systems', **kwargs) |
class TestDynamoDBDict():
def test_to_dynamodb_dict(self):
dt = datetime(2022, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
test_model = DictTestModel()
test_model.number_attr = 1
test_model.unicode_attr = 'foo'
test_model.datetime_attr = dt
test_model.bool_attr = True
test_model.json_attr = {'foo': 'bar'}
test_model.raw_map_attr = {'binary': b'foo', 'string': 'bar'}
test_model.map_attr = DictTestMapAttribute(binary=b'foo', string='bar')
test_model.list_attr = [DictTestMapAttribute(binary=b'foo', string='bar')]
test_model.ttl_attr = dt
test_model.null_attr = True
test_model.binary_attr = b'foo'
test_model.binary_set_attr = [b'foo', b'bar']
test_model.number_set_attr = [1, 2, 3]
test_model.unicode_set_attr = ['foo', 'bar']
actual = test_model.to_dynamodb_dict()
assert (actual == {'binary_attr': {'B': 'Zm9v'}, 'binary_set_attr': {'BS': ['Zm9v', 'YmFy']}, 'bool_attr': {'BOOL': True}, 'datetime_attr': {'S': '2022-12-31T23:59:59.000000+0000'}, 'json_attr': {'S': '{"foo": "bar"}'}, 'list_attr': {'L': [{'M': {'binary': {'B': 'Zm9v'}, 'string': {'S': 'bar'}}}]}, 'map_attr': {'M': {'binary': {'B': 'Zm9v'}, 'string': {'S': 'bar'}}}, 'null_attr': {'NULL': True}, 'number_attr': {'N': '1'}, 'number_set_attr': {'NS': ['1', '2', '3']}, 'raw_map_attr': {'M': {'binary': {'B': 'Zm9v'}, 'string': {'S': 'bar'}}}, 'ttl_attr': {'N': ''}, 'unicode_attr': {'S': 'foo'}, 'unicode_set_attr': {'SS': ['foo', 'bar']}})
_ = json.dumps(actual)
def test_from_dynamodb_dict(self):
dynamodb_dict = {'binary_attr': {'B': 'Zm9v'}, 'binary_set_attr': {'BS': ['Zm9v', 'YmFy']}, 'bool_attr': {'BOOL': True}, 'datetime_attr': {'S': '2022-12-31T23:59:59.000000+0000'}, 'json_attr': {'S': '{"foo": "bar"}'}, 'map_attr': {'M': {'binary': {'B': 'Zm9v'}, 'string': {'S': 'bar'}}}, 'null_attr': {'NULL': True}, 'number_attr': {'N': '1'}, 'number_set_attr': {'NS': ['1', '2', '3']}, 'raw_map_attr': {'M': {'binary': {'B': 'Zm9v'}, 'string': {'S': 'bar'}}}, 'ttl_attr': {'N': ''}, 'unicode_attr': {'S': 'foo'}, 'unicode_set_attr': {'SS': ['foo', 'bar']}}
test_model = DictTestModel()
test_model.from_dynamodb_dict(dynamodb_dict)
expected_dt = datetime(2022, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
assert (test_model.binary_attr == b'foo')
assert (test_model.binary_set_attr == {b'foo', b'bar'})
assert (test_model.number_attr == 1)
assert (test_model.number_set_attr == {1, 2, 3})
assert (test_model.unicode_attr == 'foo')
assert (test_model.unicode_set_attr == {'foo', 'bar'})
assert (test_model.datetime_attr == expected_dt)
assert (test_model.bool_attr is True)
assert (test_model.json_attr == {'foo': 'bar'})
assert (test_model.map_attr.binary == b'foo')
assert (test_model.map_attr.string == 'bar')
assert (test_model.raw_map_attr.binary == b'foo')
assert (test_model.raw_map_attr.string == 'bar')
assert (test_model.ttl_attr == expected_dt)
assert (test_model.null_attr is None) |
def get_problem_graph(problem_type, n=None, instance_i=0):
if (n is None):
if (problem_type == 'HardwareGridProblem'):
n = 4
elif (problem_type == 'SKProblem'):
n = 3
elif (problem_type == 'ThreeRegularProblem'):
n = 4
else:
raise ValueError(repr(problem_type))
r = df_raw[(((df_raw['problem_type'] == problem_type) & (df_raw['n_qubits'] == n)) & (df_raw['instance_i'] == instance_i))]['problem']
return r.iloc[0] |
def extract_first_line_failure(failures_short_lines):
failures = {}
file = None
in_error = False
for line in failures_short_lines.split('\n'):
if re.search('_ \\[doctest\\]', line):
in_error = True
file = line.split(' ')[2]
elif (in_error and (not line.split(' ')[0].isdigit())):
failures[file] = line
in_error = False
return failures |
def test_create_df_from_collection(spark_context, spark_session):
input_data = [{'json_column': '{"abc": 123}', 'a': 123, 'b': 'abc'}]
output_df = create_df_from_collection(input_data, spark_context, spark_session)
target_df = spark_session.sql("select 123 as a, 'abc' as b, replace(to_json(named_struct('abc', 123)), ':', ': ') as json_column")
assert_dataframe_equality(target_df, output_df) |
class Dashboard():
def __init__(self, port):
self.vis = Visdom(port=port)
def loss(self, losses, title):
x = np.arange(1, (len(losses) + 1), 1)
self.vis.line(losses, x, env='loss', opts=dict(title=title))
def image(self, image, title):
if image.is_cuda:
image = image.cpu()
if isinstance(image, Variable):
image = image.data
image = image.numpy()
self.vis.image(image, env='images', opts=dict(title=title)) |
class TransformerDecoderTest(TestFairseqDecoderBase):
def setUp(self):
super().setUp()
dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE)
decoder = TransformerDecoder(dict)
dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256))
self.setUpDecoder(decoder)
self.setUpInput(dummy_encoder_output)
self.setUpPrevOutputTokens() |
class SharedEvent():
def __init__(self):
self._active_count = 0
self._event = asyncio.Event()
self._event.set()
def __enter__(self):
self._active_count += 1
self._event.clear()
def __exit__(self, _exc_type, _exc_val, _exc_tb):
self._active_count -= 1
if (not self._active_count):
self._event.set()
async def wait(self) -> None:
(await self._event.wait()) |
class Servo(object):
pypilot_dir = (os.getenv('HOME') + '/.pypilot/')
calibration_filename = (pypilot_dir + 'servocalibration')
def __init__(self, client, sensors):
self.client = client
self.sensors = sensors
self.lastdir = 0
self.calibration = self.register(JSONValue, 'calibration', {})
self.load_calibration()
self.position_command = self.register(TimedProperty, 'position_command')
self.command = self.register(TimedProperty, 'command')
self.speed_gain = self.register(RangeProperty, 'speed_gain', 0, 0, 1)
self.duty = self.register(SensorValue, 'duty')
self.faults = self.register(ResettableValue, 'faults', 0, persistent=True)
self.voltage = self.register(SensorValue, 'voltage')
self.current = self.register(SensorValue, 'current')
self.current.noise = self.register(SensorValue, 'current.noise')
self.current.lasttime = time.monotonic()
self.controller_temp = self.register(TimeoutSensorValue, 'controller_temp')
self.motor_temp = self.register(TimeoutSensorValue, 'motor_temp')
self.engaged = self.register(BooleanValue, 'engaged', False)
self.max_current = self.register(RangeSetting, 'max_current', 4.5, 0, 50, 'amps')
self.current.factor = self.register(RangeProperty, 'current.factor', 1, 0.8, 1.2, persistent=True)
self.current.offset = self.register(RangeProperty, 'current.offset', 0, (- 1.2), 1.2, persistent=True)
self.voltage.factor = self.register(RangeProperty, 'voltage.factor', 1, 0.8, 1.2, persistent=True)
self.voltage.offset = self.register(RangeProperty, 'voltage.offset', 0, (- 1.2), 1.2, persistent=True)
self.max_controller_temp = self.register(RangeProperty, 'max_controller_temp', 60, 45, 80, persistent=True)
self.max_motor_temp = self.register(RangeProperty, 'max_motor_temp', 60, 30, 80, persistent=True)
self.max_slew_speed = self.register(RangeSetting, 'max_slew_speed', 28, 0, 100, '')
self.max_slew_slow = self.register(RangeSetting, 'max_slew_slow', 34, 0, 100, '')
self.gain = self.register(RangeProperty, 'gain', 1, (- 10), 10, persistent=True)
self.clutch_pwm = self.register(RangeProperty, 'clutch_pwm', 100, 10, 100, persistent=True)
self.use_brake = self.register(BooleanProperty, 'use_brake', False, persistent=True)
self.brake_on = False
self.period = self.register(RangeSetting, 'period', 0.4, 0.1, 3, 'sec', profiled=True)
self.compensate_current = self.register(BooleanProperty, 'compensate_current', False, persistent=True)
self.compensate_voltage = self.register(BooleanProperty, 'compensate_voltage', False, persistent=True)
self.amphours = self.register(ResettableValue, 'amp_hours', 0, persistent=True)
self.watts = self.register(SensorValue, 'watts')
self.speed = self.register(SensorValue, 'speed')
self.speed.min = self.register(MaxRangeSetting, 'speed.min', 100, 0, 100, '%', profiled=True)
self.speed.max = self.register(MinRangeSetting, 'speed.max', 100, 0, 100, '%', self.speed.min, profiled=True)
self.position = self.register(SensorValue, 'position')
self.position.elp = 0
self.position.set(0)
self.position.p = self.register(RangeProperty, 'position.p', 0.15, 0.01, 1, persistent=True)
self.position.i = self.register(RangeProperty, 'position.i', 0, 0, 0.1, persistent=True)
self.position.d = self.register(RangeProperty, 'position.d', 0.02, 0, 0.1, persistent=True)
self.rawcommand = self.register(SensorValue, 'raw_command')
self.use_eeprom = self.register(BooleanValue, 'use_eeprom', True, persistent=True)
self.inttime = 0
self.windup = 0
self.windup_change = 0
self.disengaged = True
self.force_engaged = False
self.last_zero_command_time = self.command_timeout = time.monotonic()
self.driver_timeout_start = 0
self.state = self.register(StringValue, 'state', 'none')
self.controller = self.register(StringValue, 'controller', 'none')
self.flags = self.register(ServoFlags, 'flags')
self.driver = False
self.raw_command(0)
def register(self, _type, name, *args, **kwargs):
return self.client.register(_type(*([('servo.' + name)] + list(args)), **kwargs))
def send_command(self):
t = time.monotonic()
dp = (t - self.position_command.time)
dc = (t - self.command.time)
if ((dp < dc) and (not self.sensors.rudder.invalid())):
self.disengaged = False
if (abs((self.position.value - self.position_command.value)) < 1):
self.command.command(0)
else:
self.do_position_command(self.position_command.value)
return
elif (self.command.value and (not self.fault())):
if (dc > 1):
self.command.command(0)
self.disengaged = False
self.do_command(self.command.value)
def do_position_command(self, position):
e = (position - self.position.value)
d = (self.speed.value * self.sensors.rudder.range.value)
self.position.elp = ((0.98 * self.position.elp) + (0.02 * min(max(e, (- 30)), 30)))
p = (self.position.p.value * e)
i = (self.position.i.value * self.position.elp)
d = (self.position.d.value * d)
pid = ((p + i) + d)
self.do_command(pid)
def do_command(self, speed):
t = time.monotonic()
dt = (t - self.inttime)
if self.force_engaged:
self.disengaged = False
else:
self.windup = 0
self.inttime = t
if self.fault():
self.stop()
if (not speed):
if ((not self.force_engaged) and ((time.monotonic() - self.command.time) > 1)):
self.disengaged = True
self.raw_command(0)
return
speed *= self.gain.value
if (((self.flags.value & (ServoFlags.PORT_OVERCURRENT_FAULT | ServoFlags.MAX_RUDDER_FAULT)) and (speed > 0)) or ((self.flags.value & (ServoFlags.STARBOARD_OVERCURRENT_FAULT | ServoFlags.MIN_RUDDER_FAULT)) and (speed < 0))):
self.stop()
return
rudder_range = self.sensors.rudder.range.value
if (self.position.value < (0.9 * rudder_range)):
self.flags.clearbit(ServoFlags.PORT_OVERCURRENT_FAULT)
if (self.position.value > ((- 0.9) * rudder_range)):
self.flags.clearbit(ServoFlags.STARBOARD_OVERCURRENT_FAULT)
if (self.compensate_voltage.value and self.voltage.value):
speed *= (12 / self.voltage.value)
min_speed = (self.speed.min.value / 100.0)
max_speed = (self.speed.max.value / 100.0)
min_speed += (((max_speed - min_speed) * self.duty.value) * self.speed_gain.value)
min_speed = min(min_speed, max_speed)
if self.command.use_period:
period = max(self.period.value, (2 * dt))
self.windup += ((speed - self.speed.value) * dt)
if (abs(self.windup) > ((period * min_speed) / 1.5)):
if (abs(speed) < min_speed):
speed = (min_speed if (self.windup > 0) else (- min_speed))
else:
speed = 0
max_windup = (1.5 * period)
if (abs(self.windup) > max_windup):
self.flags.setbit(ServoFlags.SATURATED)
self.windup = (max_windup * sign(self.windup))
else:
self.flags.clearbit(ServoFlags.SATURATED)
last_speed = self.speed.value
if (speed or last_speed):
m = (speed * last_speed)
if (m <= 0):
if ((t - self.windup_change) < self.period.value):
if (last_speed > 0):
speed = min_speed
elif (last_speed < 0):
speed = (- min_speed)
else:
speed = 0
else:
self.windup_change = t
if (m < 0):
speed = 0
speed = min(max(speed, (- max_speed)), max_speed)
self.speed.set(speed)
try:
if (speed > 0):
cal = self.calibration.value['port']
elif (speed < 0):
cal = self.calibration.value['starboard']
else:
self.raw_command(0)
return
command = (cal[0] + (abs(speed) * cal[1]))
except:
print(_('servo calibration invalid'), self.calibration.value)
self.calibration.set({'port': [0.2, 0.8], 'starboard': [0.2, 0.8]})
return
if (speed < 0):
command = (- command)
if self.sensors.rudder.invalid():
position = (self.position.value + ((command * dt) * rudder_range))
self.position.set(min(max(position, (- rudder_range)), rudder_range))
self.raw_command(command)
def stop(self):
self.brake_on = False
self.do_raw_command(0)
self.lastdir = 0
self.state.update('stop')
def raw_command(self, command):
self.brake_on = self.use_brake.value
self.do_raw_command(command)
if (command <= 0):
if (command < 0):
self.state.update('starboard')
self.lastdir = (- 1)
else:
self.speed.set(0)
if self.brake_on:
self.state.update('brake')
else:
self.state.update('idle')
else:
self.state.update('port')
self.lastdir = 1
def do_raw_command(self, command):
self.rawcommand.set(command)
lp = 0.001
self.duty.set(((lp * int((not (not command)))) + ((1 - lp) * self.duty.value)))
t = time.monotonic()
if (command == 0):
if ((t > (self.command_timeout + 1)) and ((t - self.last_zero_command_time) < 0.2)):
return
self.last_zero_command_time = t
else:
self.command_timeout = t
if self.driver:
if self.disengaged:
self.send_driver_params()
self.driver.disengage()
else:
self.driver.command(command)
mul = 1
if ((self.flags.value & ServoFlags.PORT_OVERCURRENT_FAULT) or (self.flags.value & ServoFlags.STARBOARD_OVERCURRENT_FAULT)):
mul = 2
self.send_driver_params(mul)
if self.current.value:
self.flags.clearbit(ServoFlags.DRIVER_TIMEOUT)
self.driver_timeout_start = 0
elif command:
if self.driver_timeout_start:
if ((t - self.driver_timeout_start) > 1):
self.flags.setbit(ServoFlags.DRIVER_TIMEOUT)
else:
self.driver_timeout_start = t
def reset(self):
if self.driver:
self.driver.reset()
def close_driver(self):
self.controller.update('none')
self.sensors.rudder.update(False)
try:
self.device.timeout = 0
except:
pass
fcntl.ioctl(self.device.fileno(), TIOCNXCL)
self.device.close()
self.driver = False
def send_driver_params(self, mul=1):
uncorrected_max_current = (max(0, (self.max_current.value - self.current.offset.value)) / self.current.factor.value)
minmax = self.sensors.rudder.minmax
self.driver.params((mul * uncorrected_max_current), minmax[0], minmax[1], self.max_current.value, self.max_controller_temp.value, self.max_motor_temp.value, self.sensors.rudder.range.value, self.sensors.rudder.offset.value, self.sensors.rudder.scale.value, self.sensors.rudder.nonlinearity.value, self.max_slew_speed.value, self.max_slew_slow.value, self.current.factor.value, self.current.offset.value, self.voltage.factor.value, self.voltage.offset.value, self.speed.min.value, self.speed.max.value, self.gain.value, self.clutch_pwm.value, self.brake_on)
def poll(self):
if (not self.driver):
device_path = serialprobe.probe('servo', [38400], 5)
if device_path:
print('servo probe', device_path, time.monotonic())
try:
device = serial.Serial(*device_path)
except Exception as e:
print(_('failed to open servo on:'), device_path, e)
return
try:
device.timeout = 0
fcntl.ioctl(device.fileno(), TIOCEXCL)
except Exception as e:
print(_('failed set nonblocking/exclusive'), e)
device.close()
return
from pypilot.arduino_servo.arduino_servo import ArduinoServo
self.driver = ArduinoServo(device.fileno())
self.send_driver_params()
self.device = device
self.device.path = device_path[0]
self.lastpolltime = time.monotonic()
if (not self.driver):
return
result = self.driver.poll()
if (result == (- 1)):
print('servo lost')
self.close_driver()
return
t = time.monotonic()
if (result == 0):
d = (t - self.lastpolltime)
if (d > 4):
self.close_driver()
else:
self.lastpolltime = t
if (self.controller.value == 'none'):
device_path = [self.device.port, self.device.baudrate]
print(('arduino servo ' + _('found')), device_path)
serialprobe.success('servo', device_path)
self.controller.set('arduino')
self.driver.disengage()
if (result & ServoTelemetry.VOLTAGE):
corrected_voltage = (self.voltage.factor.value * self.driver.voltage)
corrected_voltage += self.voltage.offset.value
self.voltage.set(round(corrected_voltage, 3))
if (result & ServoTelemetry.CONTROLLER_TEMP):
self.controller_temp.set(self.driver.controller_temp)
if (result & ServoTelemetry.MOTOR_TEMP):
self.motor_temp.set(self.driver.motor_temp)
if (result & ServoTelemetry.RUDDER):
if self.driver.rudder:
if math.isnan(self.driver.rudder):
if (self.sensors.rudder.source.value == 'servo'):
self.sensors.lostsensor(self.sensors.rudder)
else:
data = {'angle': self.driver.rudder, 'timestamp': t, 'device': self.device.path}
self.sensors.write('rudder', data, 'servo')
if (result & ServoTelemetry.CURRENT):
if (self.driver.current < (self.current.noise.value * 1.2)):
self.driver.current = 0
elif (self.driver.current and ((t - self.command_timeout) > 3)):
self.current.noise.update(min(max(self.current.noise.value, self.driver.current), 1))
corrected_current = (self.current.factor.value * self.driver.current)
if self.driver.current:
corrected_current = max(0, (corrected_current + self.current.offset.value))
self.current.set(round(corrected_current, 3))
dt = (t - self.current.lasttime)
self.current.lasttime = t
if ((dt > 0.01) and (dt < 0.5)):
if self.current.value:
amphours = ((self.current.value * dt) / 3600)
self.amphours.set((self.amphours.value + amphours))
lp = (0.003 * dt)
self.watts.set((((1 - lp) * self.watts.value) + ((lp * self.voltage.value) * self.current.value)))
if (result & ServoTelemetry.FLAGS):
self.max_current.set_max((50 if (self.driver.flags & ServoFlags.CURRENT_RANGE) else 20))
flags = ((self.flags.value & (~ ServoFlags.DRIVER_MASK)) | self.driver.flags)
angle = self.sensors.rudder.angle.value
if angle:
if (abs(angle) > self.sensors.rudder.range.value):
if (angle > 0):
flags |= ServoFlags.MAX_RUDDER_FAULT
else:
flags |= ServoFlags.MIN_RUDDER_FAULT
self.flags.update(flags)
self.engaged.update((not (not (self.driver.flags & ServoFlags.ENGAGED))))
if ((result & ServoTelemetry.EEPROM) and self.use_eeprom.value):
self.max_current.set(self.driver.max_current)
self.max_controller_temp.set(self.driver.max_controller_temp)
self.max_motor_temp.set(self.driver.max_motor_temp)
self.max_slew_speed.set(self.driver.max_slew_speed)
self.max_slew_slow.set(self.driver.max_slew_slow)
self.sensors.rudder.scale.set(self.driver.rudder_scale)
self.sensors.rudder.nonlinearity.set(self.driver.rudder_nonlinearity)
self.sensors.rudder.offset.set(self.driver.rudder_offset)
self.sensors.rudder.range.set(self.driver.rudder_range)
self.sensors.rudder.update_minmax()
self.current.factor.set(self.driver.current_factor)
self.current.offset.set(self.driver.current_offset)
self.voltage.factor.set(self.driver.voltage_factor)
self.voltage.offset.set(self.driver.voltage_offset)
self.speed.min.set(self.driver.min_speed)
self.speed.max.set(self.driver.max_speed)
self.gain.set(self.driver.gain)
self.clutch_pwm.set(self.driver.clutch_pwm)
if self.fault():
if ((not (self.flags.value & ServoFlags.PORT_OVERCURRENT_FAULT)) and (not (self.flags.value & ServoFlags.STARBOARD_OVERCURRENT_FAULT))):
self.faults.set((self.faults.value + 1))
if (self.flags.value & ServoFlags.OVERCURRENT_FAULT):
if (self.lastdir > 0):
self.flags.port_overcurrent_fault()
elif (self.lastdir < 0):
self.flags.starboard_overcurrent_fault()
if (self.sensors.rudder.invalid() and self.lastdir):
rudder_range = self.sensors.rudder.range.value
self.position.set((self.lastdir * rudder_range))
self.reset()
if (not self.sensors.rudder.invalid()):
self.position.set(self.sensors.rudder.angle.value)
self.send_command()
self.controller_temp.timeout()
self.motor_temp.timeout()
def fault(self):
if (not self.driver):
return False
return self.driver.fault()
def load_calibration(self):
import pyjson
try:
filename = Servo.calibration_filename
print(_('loading servo calibration'), filename)
file = open(filename)
self.calibration.set(pyjson.loads(file.readline()))
except:
print(_('WARNING: using default servo calibration!!'))
self.calibration.set(False)
def save_calibration(self):
file = open(Servo.calibration_filename, 'w')
file.write(pyjson.dumps(self.calibration)) |
def write_manifest_stats_file(bucket: str, column_name: str, manifest_entry_stats: ManifestEntryStats) -> None:
logger.info(f'writing stats completion file contents: {manifest_entry_stats}')
stats_completion_file_s3_url = get_manifest_stats_s3_url(bucket, column_name, manifest_entry_stats.delta_locator)
logger.info(f'writing stats completion file to: {stats_completion_file_s3_url}')
s3_utils.upload(stats_completion_file_s3_url, str(json.dumps(manifest_entry_stats)))
logger.info(f'stats completion file written to: {stats_completion_file_s3_url}') |
class TestPassportBase():
driver_license_selfie_file_id = 'DgADBAADEQQAAkopgFNr6oi-wISRtAI'
driver_license_selfie_file_unique_id = 'd4e390cca57b4da5a65322b304762a12'
driver_license_front_side_file_id = 'DgADBAADxwMAApnQgVPK2-ckL2eXVAI'
driver_license_front_side_file_unique_id = 'd9d52a700cbb4a189a80104aa5978133'
driver_license_reverse_side_file_id = 'DgADBAADNQQAAtoagFPf4wwmFZdmyQI'
driver_license_reverse_side_file_unique_id = 'adc3145fd2e84d95b64d68eaa22aa33e'
driver_license_translation_1_file_id = 'DgADBAADswMAAisqQVAmooP-kVgLgAI'
driver_license_translation_1_file_unique_id = '52a90d53d6064bb58feb582acdc3a324'
driver_license_translation_2_file_id = 'DgADBAAD1QMAAnrpQFBMZsT3HysjwwI'
driver_license_translation_2_file_unique_id = '7285f864d168441ba1f7d'
utility_bill_1_file_id = 'DgADBAADLAMAAhwfgVMyfGa5Nr0LvAI'
utility_bill_1_file_unique_id = 'bbaaa3ec57ee4ce7a'
utility_bill_2_file_id = 'DgADBAADaQQAAsFxgVNVfLZuT-_3ZQI'
utility_bill_2_file_unique_id = '19a12ae34dca424b85e0308f706cee75'
utility_bill_translation_1_file_id = 'DgADBAADyQUAAqyqQVC_eoX_KwNjJwI'
utility_bill_translation_1_file_unique_id = '38b2877b443542cbaf520c6e36a33ac4'
utility_bill_translation_2_file_id = 'DgADBAADsQQAAubTQVDRO_FN3lOwWwI'
utility_bill_translation_2_file_unique_id = 'f008ca48c44b4a47895ddbcd2f76741e'
driver_license_selfie_credentials_file_hash = 'Cila/qLXSBH7DpZFbb5bRZIRxeFW2uv/ulL0u0JNsYI='
driver_license_selfie_credentials_secret = 'tivdId6RNYNsvXYPppdzrbxOBuBOr9wXRPDcCvnXU7E=' |
class Effect2489(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Remote Tracking Computer')), 'falloffEffectiveness', ship.getModifiedItemAttr('shipBonusMC'), skill='Minmatar Cruiser', **kwargs) |
('social_core.backends.base.BaseAuth.request', side_effect=MockAuthCanceled)
class TestMiddleware(TestCase):
def setUp(self):
session = self.client.session
session['facebook_state'] = '1'
session.save()
self.complete_url = reverse('social:complete', kwargs={'backend': 'facebook'})
self.complete_url += '?code=2&state=1'
def test_exception(self, mocked):
with self.assertRaises(MockAuthCanceled):
self.client.get(self.complete_url)
_settings(DEBUG=True)
def test_exception_debug(self, mocked):
logging.disable(logging.CRITICAL)
with self.assertRaises(MockAuthCanceled):
self.client.get(self.complete_url)
logging.disable(logging.NOTSET)
_settings(SOCIAL_AUTH_LOGIN_ERROR_URL='/')
def test_login_error_url(self, mocked):
response = self.client.get(self.complete_url)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
_settings(SOCIAL_AUTH_LOGIN_ERROR_URL='/')
('django.contrib.messages.error', side_effect=MessageFailure)
def test_message_failure(self, mocked_request, mocked_error):
response = self.client.get(self.complete_url)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/?message=Authentication%20process%20canceled&backend=facebook') |
(stability='beta')
def train(params: Dict, dtrain: RayDMatrix, num_boost_round: int=10, *args, evals: Union[(List[Tuple[(RayDMatrix, str)]], Tuple[(RayDMatrix, str)])]=(), evals_result: Optional[Dict]=None, additional_results: Optional[Dict]=None, ray_params: Union[(None, RayParams, Dict)]=None, _remote: Optional[bool]=None, **kwargs) -> xgb.Booster:
os.environ.setdefault('RAY_IGNORE_UNHANDLED_ERRORS', '1')
if (platform.system() == 'Windows'):
raise RuntimeError('xgboost-ray training currently does not support Windows.')
if (xgb is None):
raise ImportError('xgboost package is not installed. XGBoost-Ray WILL NOT WORK. FIX THIS by running `pip install "xgboost-ray"`.')
if (_remote is None):
_remote = (_is_client_connected() and (not is_session_enabled()))
if (not ray.is_initialized()):
ray.init()
if _remote:
(num_cpus=0)
def _wrapped(*args, **kwargs):
_evals_result = {}
_additional_results = {}
bst = train(*args, num_boost_round=num_boost_round, evals_result=_evals_result, additional_results=_additional_results, **kwargs)
return (bst, _evals_result, _additional_results)
_wrapped = force_on_current_node(_wrapped)
(bst, train_evals_result, train_additional_results) = ray.get(_wrapped.remote(params, dtrain, *args, evals=evals, ray_params=ray_params, _remote=False, **kwargs))
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
_maybe_print_legacy_warning()
_validate_kwargs_for_func(kwargs, xgb.train, 'xgb.train()')
start_time = time.time()
ray_params = _validate_ray_params(ray_params)
max_actor_restarts = (ray_params.max_actor_restarts if (ray_params.max_actor_restarts >= 0) else float('inf'))
_assert_ray_support()
if (not isinstance(dtrain, RayDMatrix)):
raise ValueError('The `dtrain` argument passed to `train()` is not a RayDMatrix, but of type {}. \nFIX THIS by instantiating a RayDMatrix first: `dtrain = RayDMatrix(data=data, label=label)`.'.format(type(dtrain)))
added_tune_callback = _try_add_tune_callback(kwargs)
if (added_tune_callback and ray_params.elastic_training and (not bool(os.getenv('RXGB_ALLOW_ELASTIC_TUNE', '0')))):
raise ValueError('Elastic Training cannot be used with Ray Tune. Please disable elastic_training in RayParams in order to use xgboost_ray with Tune.')
if (added_tune_callback or get_current_placement_group()):
cpus_per_actor = ray_params.cpus_per_actor
gpus_per_actor = max(0, ray_params.gpus_per_actor)
else:
(cpus_per_actor, gpus_per_actor) = _autodetect_resources(ray_params=ray_params, use_tree_method=(('tree_method' in params) and (params['tree_method'] is not None) and params['tree_method'].startswith('gpu')))
tree_method = (params.get('tree_method', 'auto') or 'auto')
if (tree_method == 'exact'):
raise ValueError("`exact` tree method doesn't support distributed training.")
if (params.get('updater', None) == 'grow_colmaker'):
raise ValueError("`grow_colmaker` updater doesn't support distributed training.")
if ((gpus_per_actor > 0) and (not tree_method.startswith('gpu_'))):
warnings.warn(f'GPUs have been assigned to the actors, but the current XGBoost tree method is set to `{tree_method}`. Thus, GPUs will currently not be used. To enable GPUs usage, please set the `tree_method` to a GPU-compatible option, e.g. `gpu_hist`.')
if ((gpus_per_actor == 0) and (cpus_per_actor == 0)):
raise ValueError('cpus_per_actor and gpus_per_actor both cannot be 0. Are you sure your cluster has CPUs available?')
if (ray_params.elastic_training and (ray_params.max_failed_actors == 0)):
raise ValueError('Elastic training enabled but the maximum number of failed actors is set to 0. This means that elastic training is effectively disabled. Please set `RayParams.max_failed_actors` to something larger than 0 to enable elastic training.')
if (ray_params.elastic_training and (ray_params.max_actor_restarts == 0)):
raise ValueError('Elastic training enabled but the maximum number of actor restarts is set to 0. This means that elastic training is effectively disabled. Please set `RayParams.max_actor_restarts` to something larger than 0 to enable elastic training.')
if (not dtrain.has_label):
raise ValueError('Training data has no label set. Please make sure to set the `label` argument when initializing `RayDMatrix()` for data you would like to train on.')
if ((not dtrain.loaded) and (not dtrain.distributed)):
dtrain.load_data(ray_params.num_actors)
for (deval, _name) in evals:
if (not deval.has_label):
raise ValueError('Evaluation data has no label set. Please make sure to set the `label` argument when initializing `RayDMatrix()` for data you would like to evaluate on.')
if ((not deval.loaded) and (not deval.distributed)):
deval.load_data(ray_params.num_actors)
bst = None
train_evals_result = {}
train_additional_results = {}
tries = 0
checkpoint = _Checkpoint()
current_results = {}
actors = ([None] * ray_params.num_actors)
pending_actors = {}
(queue, stop_event) = _create_communication_processes(added_tune_callback)
placement_strategy = None
if (not ray_params.elastic_training):
if (added_tune_callback or get_current_placement_group()):
placement_strategy = None
elif bool(ENV.USE_SPREAD_STRATEGY):
placement_strategy = 'SPREAD'
if (placement_strategy is not None):
pg = _create_placement_group(cpus_per_actor, gpus_per_actor, ray_params.resources_per_actor, ray_params.num_actors, placement_strategy)
else:
pg = None
start_actor_ranks = set(range(ray_params.num_actors))
total_training_time = 0.0
boost_rounds_left = num_boost_round
last_checkpoint_value = checkpoint.value
while (tries <= max_actor_restarts):
if ((checkpoint.iteration >= 0) and (checkpoint.value != last_checkpoint_value)):
boost_rounds_left -= (checkpoint.iteration + 1)
last_checkpoint_value = checkpoint.value
logger.debug(f'Boost rounds left: {boost_rounds_left}')
training_state = _TrainingState(actors=actors, queue=queue, stop_event=stop_event, checkpoint=checkpoint, additional_results=current_results, training_started_at=0.0, placement_group=pg, failed_actor_ranks=start_actor_ranks, pending_actors=pending_actors)
try:
(bst, train_evals_result, train_additional_results) = _train(params, dtrain, boost_rounds_left, *args, evals=evals, ray_params=ray_params, cpus_per_actor=cpus_per_actor, gpus_per_actor=gpus_per_actor, _training_state=training_state, **kwargs)
if (training_state.training_started_at > 0.0):
total_training_time += (time.time() - training_state.training_started_at)
break
except (RayActorError, RayTaskError) as exc:
if (training_state.training_started_at > 0.0):
total_training_time += (time.time() - training_state.training_started_at)
alive_actors = sum((1 for a in actors if (a is not None)))
start_again = False
if ray_params.elastic_training:
if (alive_actors < (ray_params.num_actors - ray_params.max_failed_actors)):
raise RuntimeError('A Ray actor died during training and the maximum number of dead actors in elastic training was reached. Shutting down training.') from exc
start_actor_ranks.clear()
if (exc.__cause__ and isinstance(exc.__cause__, RayXGBoostActorAvailable)):
logger.info(f'A new actor became available. Re-starting training from latest checkpoint with new actor. This will use {alive_actors} existing actors and start {len(start_actor_ranks)} new actors. Sleeping for 10 seconds for cleanup.')
tries -= 1
start_again = True
elif ((tries + 1) <= max_actor_restarts):
if (exc.__cause__ and isinstance(exc.__cause__, RayXGBoostTrainingError)):
logger.warning(f'Caught exception: {exc.__cause__}')
logger.warning(f'A Ray actor died during training. Trying to continue training on the remaining actors. This will use {alive_actors} existing actors and start {len(start_actor_ranks)} new actors. Sleeping for 10 seconds for cleanup.')
start_again = True
elif ((tries + 1) <= max_actor_restarts):
if (exc.__cause__ and isinstance(exc.__cause__, RayXGBoostTrainingError)):
logger.warning(f'Caught exception: {exc.__cause__}')
logger.warning(f'A Ray actor died during training. Trying to restart and continue training from last checkpoint (restart {(tries + 1)} of {max_actor_restarts}). This will use {alive_actors} existing actors and start {len(start_actor_ranks)} new actors. Sleeping for 10 seconds for cleanup.')
start_again = True
if start_again:
time.sleep(5)
queue.shutdown()
stop_event.shutdown()
time.sleep(5)
(queue, stop_event) = _create_communication_processes()
else:
raise RuntimeError(f'A Ray actor died during training and the maximum number of retries ({max_actor_restarts}) is exhausted.') from exc
tries += 1
total_time = (time.time() - start_time)
train_additional_results['training_time_s'] = total_training_time
train_additional_results['total_time_s'] = total_time
if ray_params.verbose:
maybe_log = logger.info
else:
maybe_log = logger.debug
maybe_log('[RayXGBoost] Finished XGBoost training on training data with total N={total_n:,} in {total_time_s:.2f} seconds ({training_time_s:.2f} pure XGBoost training time).'.format(**train_additional_results))
_shutdown(actors=actors, pending_actors=pending_actors, queue=queue, event=stop_event, placement_group=pg, force=False)
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst |
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: str) -> str:
if ('\n' in text):
return super()._fill_text(text, width, indent)
else:
return argparse.HelpFormatter._fill_text(self, text, width, indent) |
class ResizeLongestSide():
def __init__(self, target_length: int) -> None:
self.target_length = target_length
def apply_image(self, image: np.ndarray) -> np.ndarray:
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return np.array(resize(to_pil_image(image), target_size))
def apply_coords(self, coords: np.ndarray, original_size: Tuple[(int, ...)]) -> np.ndarray:
(old_h, old_w) = original_size
(new_h, new_w) = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length)
coords = deepcopy(coords).astype(float)
coords[(..., 0)] = (coords[(..., 0)] * (new_w / old_w))
coords[(..., 1)] = (coords[(..., 1)] * (new_h / old_h))
return coords
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[(int, ...)]) -> np.ndarray:
boxes = self.apply_coords(boxes.reshape((- 1), 2, 2), original_size)
return boxes.reshape((- 1), 4)
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return F.interpolate(image, target_size, mode='bilinear', align_corners=False, antialias=True)
def apply_coords_torch(self, coords: torch.Tensor, original_size: Tuple[(int, ...)]) -> torch.Tensor:
(old_h, old_w) = original_size
(new_h, new_w) = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length)
coords = deepcopy(coords).to(torch.float)
coords[(..., 0)] = (coords[(..., 0)] * (new_w / old_w))
coords[(..., 1)] = (coords[(..., 1)] * (new_h / old_h))
return coords
def apply_boxes_torch(self, boxes: torch.Tensor, original_size: Tuple[(int, ...)]) -> torch.Tensor:
boxes = self.apply_coords_torch(boxes.reshape((- 1), 2, 2), original_size)
return boxes.reshape((- 1), 4)
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[(int, int)]:
scale = ((long_side_length * 1.0) / max(oldh, oldw))
(newh, neww) = ((oldh * scale), (oldw * scale))
neww = int((neww + 0.5))
newh = int((newh + 0.5))
return (newh, neww) |
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, condition, left, right):
self.condition = condition
self.left = left
self.right = right
def evaluate(self, context):
if self.condition.evaluate(context):
return self.left.evaluate(context)
else:
return self.right.evaluate(context)
def __str__(self):
return ('(%s ? %s : %s)' % (self.condition, self.left, self.right)) |
def test_timedynamic_geo_json():
import geopandas as gpd
assert ('naturalearth_lowres' in gpd.datasets.available)
datapath = gpd.datasets.get_path('naturalearth_lowres')
gdf = gpd.read_file(datapath)
n_periods = 3
dt_range = pd.Series(pd.date_range('2001-08-1', periods=n_periods, freq='M'))
dt_index = [f'{dt.timestamp():.0f}' for dt in dt_range]
styledata = {}
for country in gdf.index:
pdf = pd.DataFrame({'color': np.random.normal(size=n_periods), 'opacity': np.random.normal(size=n_periods)}, index=dt_index)
styledata[country] = pdf.cumsum()
(max_color, min_color) = (0, 0)
for (country, data) in styledata.items():
max_color = max(max_color, data['color'].max())
min_color = min(max_color, data['color'].min())
cmap = linear.PuRd_09.scale(min_color, max_color)
def norm(col):
return ((col - col.min()) / (col.max() - col.min()))
for (country, data) in styledata.items():
data['color'] = data['color'].apply(cmap)
data['opacity'] = norm(data['opacity'])
styledict = {str(country): data.to_dict(orient='index') for (country, data) in styledata.items()}
m = folium.Map((0, 0), zoom_start=2)
time_slider_choropleth = TimeSliderChoropleth(gdf.to_json(), styledict)
time_slider_choropleth.add_to(m)
rendered = time_slider_choropleth._template.module.script(time_slider_choropleth)
m._repr_html_()
out = normalize(m._parent.render())
assert ('<script src=" in out)
expected_timestamps = sorted(dt_index, key=int)
expected_timestamps = f'let timestamps = {expected_timestamps};'
expected_timestamps = expected_timestamps.split(';')[0].strip().replace("'", '"')
rendered_timestamps = rendered.strip(' \n{').split(';')[0].strip()
assert (expected_timestamps == rendered_timestamps)
expected_styledict = normalize(json.dumps(styledict, sort_keys=True))
assert (expected_styledict in normalize(rendered)) |
def _pause() -> None:
player.pause()
try:
current_song = models.CurrentSong.objects.get()
current_song.last_paused = timezone.now()
current_song.save()
except models.CurrentSong.DoesNotExist:
pass
storage.put('paused', True)
redis.put('paused', True) |
.parametrize(['sparse', 'dtype'], [pytest.param(True, 'csr', id='sparse'), pytest.param(False, 'csr', id='sparse2dense'), pytest.param(False, 'dense', id='dense')])
def test_eigen_small(sparse, dtype):
H = (qutip.sigmax() + qutip.sigmaz()).to(dtype)
all_spvals = H.eigenenergies(sparse=sparse)
(spvals, spvecs) = H.eigenstates(sparse=sparse, eigvals=1)
assert (np.abs((all_spvals[0] - spvals[0])) <= 1e-14)
is_eigen_set(H, spvals, spvecs) |
def word_ngrams_indices(s, n):
tokens_with_indices = split_indices(s)
ngram_seqs_with_indices = form_ngrams(tokens_with_indices, n)
ngram_indices_pairs = (zip(*ngram_with_indices) for ngram_with_indices in ngram_seqs_with_indices)
return ((' '.join(ngram_seq), (indices[0][0], indices[(- 1)][1])) for (ngram_seq, indices) in ngram_indices_pairs) |
class DrudeLorentzPadeBath(BosonicBath):
def __init__(self, Q, lam, gamma, T, Nk, combine=True, tag=None):
(eta_p, gamma_p) = self._corr(lam=lam, gamma=gamma, T=T, Nk=Nk)
ck_real = [np.real(eta) for eta in eta_p]
vk_real = [gam for gam in gamma_p]
ck_imag = [np.imag(eta_p[0])]
vk_imag = [gamma_p[0]]
super().__init__(Q, ck_real, vk_real, ck_imag, vk_imag, combine=combine, tag=tag)
self._dl_terminator = _DrudeLorentzTerminator(Q=Q, lam=lam, gamma=gamma, T=T)
def terminator(self):
(delta, L) = self._dl_terminator.terminator(self.exponents)
return (delta, L)
def _corr(self, lam, gamma, T, Nk):
beta = (1.0 / T)
(kappa, epsilon) = self._kappa_epsilon(Nk)
eta_p = [((lam * gamma) * (self._cot(((gamma * beta) / 2.0)) - 1j))]
gamma_p = [gamma]
for ll in range(1, (Nk + 1)):
eta_p.append(((((((kappa[ll] / beta) * 4) * lam) * gamma) * (epsilon[ll] / beta)) / (((epsilon[ll] ** 2) / (beta ** 2)) - (gamma ** 2))))
gamma_p.append((epsilon[ll] / beta))
return (eta_p, gamma_p)
def _cot(self, x):
return (1.0 / np.tan(x))
def _kappa_epsilon(self, Nk):
eps = self._calc_eps(Nk)
chi = self._calc_chi(Nk)
kappa = [0]
prefactor = ((0.5 * Nk) * ((2 * (Nk + 1)) + 1))
for j in range(Nk):
term = prefactor
for k in range((Nk - 1)):
term *= (((chi[k] ** 2) - (eps[j] ** 2)) / (((eps[k] ** 2) - (eps[j] ** 2)) + self._delta(j, k)))
for k in [(Nk - 1)]:
term /= (((eps[k] ** 2) - (eps[j] ** 2)) + self._delta(j, k))
kappa.append(term)
epsilon = ([0] + eps)
return (kappa, epsilon)
def _delta(self, i, j):
return (1.0 if (i == j) else 0.0)
def _calc_eps(self, Nk):
alpha = np.diag([(1.0 / np.sqrt((((2 * k) + 5) * ((2 * k) + 3)))) for k in range(((2 * Nk) - 1))], k=1)
alpha += alpha.transpose()
evals = eigvalsh(alpha)
eps = [((- 2.0) / val) for val in evals[0:Nk]]
return eps
def _calc_chi(self, Nk):
alpha_p = np.diag([(1.0 / np.sqrt((((2 * k) + 7) * ((2 * k) + 5)))) for k in range(((2 * Nk) - 2))], k=1)
alpha_p += alpha_p.transpose()
evals = eigvalsh(alpha_p)
chi = [((- 2.0) / val) for val in evals[0:(Nk - 1)]]
return chi |
class TestSerialise(TestCase):
def test_symbol_encoder_symbol(self):
(a, a_dict) = scalar_var_dict()
a_ser_json = Serialise._SymbolEncoder().default(a)
self.assertEqual(a_ser_json, a_dict)
add = pybamm.Addition(2, 4)
add_json = {'py/id': mock.ANY, 'py/object': 'pybamm.expression_tree.binary_operators.Addition', 'name': '+', 'id': mock.ANY, 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [{'py/id': mock.ANY, 'py/object': 'pybamm.expression_tree.scalar.Scalar', 'name': '2.0', 'id': mock.ANY, 'value': 2.0, 'children': []}, {'py/id': mock.ANY, 'py/object': 'pybamm.expression_tree.scalar.Scalar', 'name': '4.0', 'id': mock.ANY, 'value': 4.0, 'children': []}]}
add_ser_json = Serialise._SymbolEncoder().default(add)
self.assertEqual(add_ser_json, add_json)
def test_symbol_encoder_explicitTimeIntegral(self):
expr = pybamm.ExplicitTimeIntegral(pybamm.Scalar(5), pybamm.Scalar(1))
expr_json = {'py/object': 'pybamm.expression_tree.unary_operators.ExplicitTimeIntegral', 'py/id': mock.ANY, 'name': 'explicit time integral', 'id': mock.ANY, 'children': [{'py/object': 'pybamm.expression_tree.scalar.Scalar', 'py/id': mock.ANY, 'name': '5.0', 'id': mock.ANY, 'value': 5.0, 'children': []}], 'initial_condition': {'py/object': 'pybamm.expression_tree.scalar.Scalar', 'py/id': mock.ANY, 'name': '1.0', 'id': mock.ANY, 'value': 1.0, 'children': []}}
expr_ser_json = Serialise._SymbolEncoder().default(expr)
self.assertEqual(expr_json, expr_ser_json)
def test_symbol_encoder_event(self):
expression = pybamm.Scalar(1)
event = pybamm.Event('my event', expression)
event_json = {'py/object': 'pybamm.models.event.Event', 'py/id': mock.ANY, 'name': 'my event', 'event_type': ['EventType.TERMINATION', 0], 'expression': {'py/object': 'pybamm.expression_tree.scalar.Scalar', 'py/id': mock.ANY, 'name': '1.0', 'id': mock.ANY, 'value': 1.0, 'children': []}}
event_ser_json = Serialise._SymbolEncoder().default(event)
self.assertEqual(event_ser_json, event_json)
def test_mesh_encoder(self):
(mesh, mesh_json) = mesh_var_dict()
mesh_ser_json = Serialise._MeshEncoder().default(mesh)
self.assertEqual(mesh_ser_json, mesh_json)
def test_deconstruct_pybamm_dicts(self):
x = pybamm.SpatialVariable('x', 'negative electrode')
test_dict = {'rod': {x: {'min': 0.0, 'max': 2.0}}}
ser_dict = {'rod': {'symbol_x': {'py/object': 'pybamm.expression_tree.independent_variable.SpatialVariable', 'py/id': mock.ANY, 'name': 'x', 'id': mock.ANY, 'domains': {'primary': ['negative electrode'], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': []}, 'x': {'min': 0.0, 'max': 2.0}}}
self.assertEqual(Serialise()._deconstruct_pybamm_dicts(test_dict), ser_dict)
def test_get_pybamm_class(self):
(_, scalar_dict) = scalar_var_dict()
scalar_class = Serialise()._get_pybamm_class(scalar_dict)
self.assertIsInstance(scalar_class, pybamm.Scalar)
(_, mesh_dict) = mesh_var_dict()
mesh_class = Serialise()._get_pybamm_class(mesh_dict)
self.assertIsInstance(mesh_class, pybamm.Mesh)
with self.assertRaises(AttributeError):
unrecognised_symbol = {'py/id': mock.ANY, 'py/object': 'pybamm.expression_tree.scalar.Scale', 'name': '5.0', 'id': mock.ANY, 'value': 5.0, 'children': []}
Serialise()._get_pybamm_class(unrecognised_symbol)
def test_reconstruct_symbol(self):
(scalar, scalar_dict) = scalar_var_dict()
new_scalar = Serialise()._reconstruct_symbol(scalar_dict)
self.assertEqual(new_scalar, scalar)
def test_reconstruct_expression_tree(self):
y = pybamm.StateVector(slice(0, 1))
t = pybamm.t
equation = ((2 * y) + t)
equation_json = {'py/object': 'pybamm.expression_tree.binary_operators.Addition', 'py/id': , 'name': '+', 'id': (- ), 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [{'py/object': 'pybamm.expression_tree.binary_operators.Multiplication', 'py/id': , 'name': '*', 'id': , 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [{'py/object': 'pybamm.expression_tree.scalar.Scalar', 'py/id': , 'name': '2.0', 'id': , 'value': 2.0, 'children': []}, {'py/object': 'pybamm.expression_tree.state_vector.StateVector', 'py/id': , 'name': 'y[0:1]', 'id': , 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'y_slice': [{'start': 0, 'stop': 1, 'step': None}], 'evaluation_array': [True], 'children': []}]}, {'py/object': 'pybamm.expression_tree.independent_variable.Time', 'py/id': , 'name': 'time', 'id': (- ), 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': []}]}
new_equation = Serialise()._reconstruct_expression_tree(equation_json)
self.assertEqual(new_equation, equation)
def test_reconstruct_mesh(self):
(mesh, mesh_dict) = mesh_var_dict()
new_mesh = Serialise()._reconstruct_mesh(mesh_dict)
testing.assert_array_equal(new_mesh['negative particle'].edges, mesh['negative particle'].edges)
testing.assert_array_equal(new_mesh['negative particle'].nodes, mesh['negative particle'].nodes)
with self.assertRaisesRegex(AttributeError, "'Mesh' object has no attribute '_geometry'"):
self.assertEqual(new_mesh.geometry, mesh.geometry)
def test_reconstruct_pybamm_dict(self):
x = pybamm.SpatialVariable('x', 'negative electrode')
test_dict = {'rod': {x: {'min': 0.0, 'max': 2.0}}}
ser_dict = {'rod': {'symbol_x': {'py/object': 'pybamm.expression_tree.independent_variable.SpatialVariable', 'py/id': mock.ANY, 'name': 'x', 'id': mock.ANY, 'domains': {'primary': ['negative electrode'], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': []}, 'x': {'min': 0.0, 'max': 2.0}}}
new_dict = Serialise()._reconstruct_pybamm_dict(ser_dict)
self.assertEqual(new_dict, test_dict)
test_list = ['left', 'right']
new_list = Serialise()._reconstruct_pybamm_dict(test_list)
self.assertEqual(test_list, new_list)
def test_convert_options(self):
options_dict = {'current collector': 'uniform', 'particle phases': ['2', '1'], 'open-circuit potential': [['single', 'current sigmoid'], 'single']}
options_result = {'current collector': 'uniform', 'particle phases': ('2', '1'), 'open-circuit potential': (('single', 'current sigmoid'), 'single')}
self.assertEqual(Serialise()._convert_options(options_dict), options_result)
def test_save_load_model(self):
model = pybamm.lithium_ion.SPM(name='test_spm')
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
with self.assertRaisesRegex(NotImplementedError, 'PyBaMM can only serialise a discretised, ready-to-solve model'):
Serialise().save_model(model, filename='test_model')
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
Serialise().save_model(model, filename='test_model')
self.assertTrue(os.path.exists('test_model.json'))
Serialise().save_model(model)
filename = (('test_spm_' + datetime.now().strftime('%Y_%m_%d-%p%I_%M')) + '.json')
self.assertTrue(os.path.exists(filename))
os.remove(filename)
new_model = Serialise().load_model('test_model.json')
new_solver = new_model.default_solver
new_solution = new_solver.solve(new_model, [0, 3600])
with self.assertRaisesRegex(AttributeError, 'No variables to plot'):
new_solution.plot()
newest_model = Serialise().load_model('test_model.json', battery_model=pybamm.lithium_ion.SPM)
with open('test_model.json') as f:
model_data = json.load(f)
del model_data['py/object']
with open('test_model.json', 'w') as f:
json.dump(model_data, f)
with self.assertRaises(TypeError):
Serialise().load_model('test_model.json')
os.remove('test_model.json')
newest_solver = newest_model.default_solver
newest_solver.solve(newest_model, [0, 3600])
def test_save_experiment_model_error(self):
model = pybamm.lithium_ion.SPM()
experiment = pybamm.Experiment(['Discharge at 1C for 1 hour'])
sim = pybamm.Simulation(model, experiment=experiment)
sim.solve()
with self.assertRaisesRegex(NotImplementedError, 'Serialising models coupled to experiments is not yet supported.'):
sim.save_model('spm_experiment', mesh=False, variables=False)
def test_serialised_model_plotting(self):
model = pybamm.BaseModel()
c = pybamm.Variable('c')
model.rhs = {c: (- c)}
model.initial_conditions = {c: 1}
model.variables['c'] = c
model.variables['2c'] = (2 * c)
_ = pybamm.ScipySolver().solve(model, np.linspace(0, 1))
Serialise().save_model(model, variables=model.variables, filename='test_base_model')
new_model = Serialise().load_model('test_base_model.json')
os.remove('test_base_model.json')
new_solution = pybamm.ScipySolver().solve(new_model, np.linspace(0, 1))
new_solution.plot(['c', '2c'], testing=True)
model = pybamm.lithium_ion.SPM(name='test_spm_plotting')
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
Serialise().save_model(model, variables=model.variables, mesh=mesh, filename='test_plotting_model')
new_model = Serialise().load_model('test_plotting_model.json')
os.remove('test_plotting_model.json')
new_solver = new_model.default_solver
new_solution = new_solver.solve(new_model, [0, 3600])
new_solution.plot(testing=True) |
def update_repository_score(repo):
today = date.today()
final_score = 0.0
last_end_timedelta = timedelta(days=0)
for bucket in SEARCH_BUCKETS:
start_date = (today - bucket.delta)
end_date = (today - last_end_timedelta)
last_end_timedelta = bucket.delta
query = RepositoryActionCount.select(fn.Sum(RepositoryActionCount.count), fn.Count(RepositoryActionCount.id)).where((RepositoryActionCount.date >= start_date), (RepositoryActionCount.date < end_date), (RepositoryActionCount.repository == repo))
bucket_tuple = query.tuples()[0]
logger.debug('Got bucket tuple %s for bucket %s for repository %s', bucket_tuple, bucket, repo.id)
if (bucket_tuple[0] is None):
continue
bucket_sum = float(bucket_tuple[0])
bucket_count = int(bucket_tuple[1])
if (not bucket_count):
continue
bucket_score = (bucket_sum / (bucket_count * 1.0))
final_score += (bucket_score * bucket.weight)
normalized_score = int((final_score * 100.0))
try:
try:
search_score_row = RepositorySearchScore.get(repository=repo)
search_score_row.last_updated = datetime.now()
search_score_row.score = normalized_score
search_score_row.save()
return True
except RepositorySearchScore.DoesNotExist:
RepositorySearchScore.create(repository=repo, score=normalized_score, last_updated=today)
return True
except IntegrityError:
logger.debug('RepositorySearchScore row already existed; skipping')
return False |
class ExpvalMeasMitigatorFitter():
def __init__(self, result: Result, metadata: List[Dict[(str, any)]]):
self._num_qubits = None
self._cal_data = None
self._mitigator = None
(self._cal_data, self._num_qubits, self._method) = calibration_data(result, metadata)
def mitigator(self):
if (self._mitigator is None):
raise QiskitError('Mitigator has not been fitted. Run `fit` first.')
return self._mitigator
def fit(self, method: Optional[str]=None, generators: Optional[List[Generator]]=None) -> Union[(CompleteExpvalMeasMitigator, TensoredExpvalMeasMitigator, CTMPExpvalMeasMitigator)]:
if (method is None):
method = self._method
if (method == 'complete'):
amat = assignment_matrix(self._cal_data, self._num_qubits)
self._mitigator = CompleteExpvalMeasMitigator(amat)
elif (method == 'tensored'):
amats = []
for qubit in range(self._num_qubits):
amat = assignment_matrix(self._cal_data, self._num_qubits, [qubit])
amats.append(amat)
self._mitigator = TensoredExpvalMeasMitigator(amats)
elif (method in ['CTMP', 'ctmp']):
self._mitigator = fit_ctmp_meas_mitigator(self._cal_data, self._num_qubits, generators)
else:
raise QiskitError('Invalid expval measurement error mitigation method {}'.format(method))
return self._mitigator |
def run_and_display(prompts: List[str], controller: AttentionStore, indices_to_alter: List[int], generator: torch.Generator, run_standard_sd: bool=False, scale_factor: int=20, thresholds: Dict[(int, float)]={0: 0.05, 10: 0.5, 20: 0.8}, max_iter_to_alter: int=25, display_output: bool=False, sd_2_1: bool=False):
config = RunConfig(prompt=prompts[0], run_standard_sd=run_standard_sd, scale_factor=scale_factor, thresholds=thresholds, max_iter_to_alter=max_iter_to_alter, sd_2_1=sd_2_1)
image = run_on_prompt(model=stable, prompt=prompts, controller=controller, token_indices=indices_to_alter, seed=generator, config=config)
if display_output:
display(image)
return image |
class ItemAccessor(Accessor):
def __init__(self, key: Union[(int, str)], access_error: Optional[Catchable], path_element: TrailElement):
self.key = key
self._access_error = access_error
self._path_element = path_element
def getter(self, obj):
return obj[self.key]
def access_error(self) -> Optional[Catchable]:
return self._access_error
def trail_element(self) -> TrailElement:
return self._path_element
def __eq__(self, other):
if isinstance(other, ItemAccessor):
return ((self.key == other.key) and (self._access_error == other._access_error) and (self._path_element == other._path_element))
return NotImplemented
def __hash__(self):
try:
return hash((self.key, self._access_error))
except TypeError:
return hash(self._access_error)
def __repr__(self):
return f'{{type(self).__qualname__}}(key={self.key!r}, access_error={self.access_error}, path_element={self.trail_element!r})' |
def _spotting_delta_model_dir(feature_name: str, dataset_type: str, protocol_name: str, run_name: str, models_dir: str) -> str:
delta_train_hyperparameters = TRAIN_HYPERPARAMETERS[dataset_type][feature_name][DELTA]
return os.path.join(models_dir, create_name(delta_train_hyperparameters, run_name, DELTA, feature_name, protocol_name)) |
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
if config.is_coverage:
self.W_c = nn.Linear(1, (config.hidden_dim * 2), bias=False)
self.decode_proj = nn.Linear((config.hidden_dim * 2), (config.hidden_dim * 2))
self.v = nn.Linear((config.hidden_dim * 2), 1, bias=False)
def forward(self, s_t_hat, encoder_outputs, encoder_feature, enc_padding_mask, coverage):
(b, t_k, n) = list(encoder_outputs.size())
dec_fea = self.decode_proj(s_t_hat)
dec_fea_expanded = dec_fea.unsqueeze(1).expand(b, t_k, n).contiguous()
dec_fea_expanded = dec_fea_expanded.view((- 1), n)
att_features = (encoder_feature + dec_fea_expanded)
if config.is_coverage:
coverage_input = coverage.view((- 1), 1)
coverage_feature = self.W_c(coverage_input)
att_features = (att_features + coverage_feature)
e = torch.tanh(att_features)
scores = self.v(e)
scores = scores.view((- 1), t_k)
attn_dist_ = (F.softmax(scores, dim=1) * enc_padding_mask)
normalization_factor = attn_dist_.sum(1, keepdim=True)
attn_dist = (attn_dist_ / normalization_factor)
attn_dist = attn_dist.unsqueeze(1)
c_t = torch.bmm(attn_dist, encoder_outputs)
c_t = c_t.view((- 1), (config.hidden_dim * 2))
attn_dist = attn_dist.view((- 1), t_k)
if config.is_coverage:
coverage = coverage.view((- 1), t_k)
coverage = (coverage + attn_dist)
return (c_t, attn_dist, coverage) |
def ql_syscall_socketcall(ql: Qiling, call: int, args: int):
handlers: Mapping[(SOCKETCALL, Callable)] = {SOCKETCALL.SYS_SOCKET: ql_syscall_socket, SOCKETCALL.SYS_BIND: ql_syscall_bind, SOCKETCALL.SYS_CONNECT: ql_syscall_connect, SOCKETCALL.SYS_LISTEN: ql_syscall_listen, SOCKETCALL.SYS_ACCEPT: ql_syscall_accept, SOCKETCALL.SYS_GETSOCKNAME: ql_syscall_getsockname, SOCKETCALL.SYS_GETPEERNAME: ql_syscall_getpeername, SOCKETCALL.SYS_SOCKETPAIR: ql_syscall_socketpair, SOCKETCALL.SYS_SEND: ql_syscall_send, SOCKETCALL.SYS_RECV: ql_syscall_recv, SOCKETCALL.SYS_SENDTO: ql_syscall_sendto, SOCKETCALL.SYS_RECVFROM: ql_syscall_recvfrom, SOCKETCALL.SYS_SHUTDOWN: ql_syscall_shutdown, SOCKETCALL.SYS_SETSOCKOPT: ql_syscall_setsockopt, SOCKETCALL.SYS_GETSOCKOPT: ql_syscall_getsockopt, SOCKETCALL.SYS_RECVMSG: ql_syscall_recvmsg}
if (call not in handlers):
call_name = next((m.name for m in SOCKETCALL if (m.value == call)), '')
raise NotImplementedError(f'socketcall: call {(call_name or call)} not implemented')
handler = handlers[call]
nargs = (len(inspect.signature(handler).parameters) - 1)
params = (ql.mem.read_ptr((args + (i * ql.arch.pointersize))) for i in range(nargs))
return handler(ql, *params) |
class LayoutSkyTempleKeyMode(BitPackEnum, Enum):
ALL_BOSSES = 'all-bosses'
ALL_GUARDIANS = 'all-guardians'
ZERO = 0
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
NINE = 9
def num_keys(self):
if (self == self.ALL_BOSSES):
return 9
elif (self == self.ALL_GUARDIANS):
return 3
else:
return self.value |
('/v1/repository/<apirepopath:repository>/permissions/team/')
_param('repository', 'The full path of the repository. e.g. namespace/name')
class RepositoryTeamPermissionList(RepositoryParamResource):
_repo_admin(allow_for_superuser=True)
('listRepoTeamPermissions')
def get(self, namespace_name, repository_name):
repo_perms = model.get_repo_permissions_by_team(namespace_name, repository_name)
return {'permissions': {repo_perm.team_name: repo_perm.to_dict() for repo_perm in repo_perms}} |
def convert_diarization(base_model_name, hf_config, downstream_dict):
model = WavLMForAudioFrameClassification.from_pretrained(base_model_name, config=hf_config)
model.classifier.weight.data = downstream_dict['model.linear.weight']
model.classifier.bias.data = downstream_dict['model.linear.bias']
return model |
def forward(source, destination, recv_timeout=None, buffering=1024):
timeout = source.gettimeout()
source.settimeout(recv_timeout)
try:
raw_data = source.recv(buffering)
except socket.timeout:
pass
else:
while raw_data:
destination.sendall(raw_data)
try:
raw_data = source.recv(buffering)
except socket.timeout:
break
source.settimeout(timeout) |
.online
def test_pypi(cache_dir):
pypi = service.PyPIService(cache_dir)
dep = service.ResolvedDependency('jinja2', Version('2.4.1'))
results: dict[(service.Dependency, list[service.VulnerabilityResult])] = dict(pypi.query_all(iter([dep])))
assert (len(results) == 1)
assert (dep in results)
vulns = results[dep]
assert (len(vulns) > 0) |
class ElixirToDeclarativeWebDeclarativeChanges(MigrateElixirToDeclarative):
def schedule_upgrades(self):
super().schedule_upgrades()
self.replace_elixir()
def rename_primary_key_constraints(self):
self.rename_pk('sessiondata', ['id'])
def rename_foreign_keys_constraints(self):
self.recreate_foreign_key_constraint('sessiondata_web_session_id', 'sessiondata', 'web_session_id', 'webusersession', 'id', ondelete='CASCADE')
def change_inheriting_table_ids(self):
for (table_name, old_id_column_name, inheriting_table_name) in [('webusersession', 'usersession_id', 'usersession'), ('persistedexception', 'sessiondata_id', 'sessiondata'), ('persistedfile', 'sessiondata_id', 'sessiondata'), ('userinput', 'sessiondata_id', 'sessiondata')]:
self.change_inheriting_table(table_name, old_id_column_name, inheriting_table_name)
def replace_elixir(self):
orm_control = ExecutionContext.get_context().system_control.orm_control
self.schedule('cleanup', orm_control.remove_schema_version_for, egg_name='reahl-web-elixirimpl', fail_if_not_found=False) |
def save_model(epoch, args, model, optimizer, tr_loss, type_name=''):
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch))
optimizer_state_file = os.path.join(args.output_dir, 'pytorch_opt.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch))
torch.save(model_to_save.state_dict(), output_model_file)
torch.save({'epoch': epoch, 'optimizer_state_dict': optimizer.state_dict(), 'loss': tr_loss}, optimizer_state_file)
logger.info('Model saved to %s', output_model_file)
logger.info('Optimizer saved to %s', optimizer_state_file)
return output_model_file |
def critic_weights(matrix, objectives, correlation='pearson', scale=True):
matrix = np.asarray(matrix, dtype=float)
matrix = (matrix_scale_by_cenit_distance(matrix, objectives=objectives) if scale else matrix)
dindex = np.std(matrix, axis=0)
import pandas as pd
corr_m1 = (1 - pd.DataFrame(matrix).corr(method=correlation).to_numpy(copy=True))
uweights = (dindex * np.sum(corr_m1, axis=0))
weights = (uweights / np.sum(uweights))
return weights |
def fc(x, K, name, relu=True, reuse=False):
c = int(x.get_shape()[1])
with tf.variable_scope(name, reuse=reuse) as scope:
weights = tf.get_variable('weights', shape=[c, K])
biases = tf.get_variable('biases', shape=[K])
relu_value = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)
if relu:
result_value = tf.nn.relu(relu_value)
else:
result_value = relu_value
return result_value |
def test_from_dict_complex_struct_type():
input_dict = {'type': 'struct', 'fields': [{'type': 'list', 'values': {'type': 'map', 'keys': {'type': 'int', 'bits': 32}, 'values': {'type': 'string', 'bytes': 50}}}]}
result = from_dict(input_dict)
assert isinstance(result, StructType)
assert isinstance(result.fields[0], ListType)
assert isinstance(result.fields[0].values, MapType)
assert isinstance(result.fields[0].values.keys, IntType)
assert (result.fields[0].values.keys.bits == 32)
assert isinstance(result.fields[0].values.values, StringType)
assert (result.fields[0].values.values.bytes_ == 50) |
class Insert(COp):
__props__ = ('inplace',)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x, index, toInsert):
assert isinstance(x.type, TypedListType)
assert (x.ttype == toInsert.type)
if (not isinstance(index, Variable)):
index = pt.constant(index, ndim=0, dtype='int64')
else:
assert (index.dtype == 'int64')
assert (isinstance(index, TensorVariable) and (index.ndim == 0))
return Apply(self, [x, index, toInsert], [x.type()])
def perform(self, node, inputs, outputs):
(x, index, toInsert) = inputs
(out,) = outputs
if (not self.inplace):
out[0] = list(x)
else:
out[0] = x
toInsert = _lessbroken_deepcopy(toInsert)
out[0].insert(index, toInsert)
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
raise NotImplementedError('DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend()')
(x_name, index, toInsert) = (inp[0], inp[1], inp[2])
output_name = out[0]
fail = sub['fail']
if (not self.inplace):
init = ('\n %(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;\n ' % locals())
else:
init = f'''
{output_name} = {x_name};
'''
return (init + ('\n if(%(output_name)s==NULL){\n %(fail)s\n };\n if(PyList_Insert((PyObject*) %(output_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)), (PyObject*) %(toInsert)s)==-1){\n %(fail)s\n };\n Py_INCREF(%(output_name)s);\n ' % locals()))
def c_code_cache_version(self):
return (1,) |
def count_overlaps(grs, features=None, strandedness=None, how=None, nb_cpu=1):
kwargs = {'as_pyranges': False, 'nb_cpu': nb_cpu, 'strandedness': strandedness, 'how': how, 'nb_cpu': nb_cpu}
names = list(grs.keys())
if (features is None):
features = pr.concat(grs.values()).split(between=True)
else:
features = features.copy()
from pyranges.methods.intersection import _count_overlaps
for (name, gr) in grs.items():
gr = gr.drop()
kwargs['name'] = name
features.apply_pair(gr, _count_overlaps, **kwargs)
def to_int(df):
df[names] = df[names].astype(np.int64)
return df
features = features.apply(to_int)
return features |
class DebertaTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
slow_tokenizer_class = DebertaTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
self.add_bos_token = kwargs.pop('add_bos_token', False)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
pre_tok_state['add_prefix_space'] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
def mask_token(self) -> str:
if (self._mask_token is None):
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
_token.setter
def mask_token(self, value):
value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value)
self._mask_token = value
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids |
def test_measurement_parameters_for_values():
class Fake(FakeBase):
x = CommonBase.measurement('JUNK%d', '', preprocess_reply=(lambda v: v.replace('JUNK', '')), cast=int, values_kwargs={'testing': True})
def values(self, cmd, testing=False, **kwargs):
self.testing = testing
return super().values(cmd, **kwargs)
fake = Fake()
fake.write('5')
fake.x
assert (fake.testing is True) |
def create_wideresnet32_4(models_path, task, save_type, get_params=False):
print('Creating wrn32_4 untrained {} models...'.format(task))
model_params = get_task_params(task)
model_params['num_blocks'] = [5, 5, 5]
model_params['widen_factor'] = 4
model_params['dropout_rate'] = 0.3
model_name = '{}_wideresnet32_4'.format(task)
model_params['add_ic'] = [[0, 0, 1, 0, 1], [0, 1, 0, 1, 0], [1, 0, 1, 0, 0]]
model_params['network_type'] = 'wideresnet32_4'
model_params['augment_training'] = True
model_params['init_weights'] = True
get_lr_params(model_params)
if get_params:
return model_params
return save_networks(model_name, model_params, models_path, save_type) |
def main():
parser = argparse.ArgumentParser(description='Benchmark dataloading')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
cfg = Config.fromfile(args.config)
logger = get_root_logger()
logger.info(f'MMAction2 Version: {__version__}')
logger.info(f'Config: {cfg.text}')
ann_file_bench = 'benchlist.txt'
if (not os.path.exists(ann_file_bench)):
with open(cfg.ann_file_train) as f:
lines = f.readlines()[:256]
with open(ann_file_bench, 'w') as f1:
f1.writelines(lines)
cfg.data.train.ann_file = ann_file_bench
dataset = build_dataset(cfg.data.train)
data_loader = build_dataloader(dataset, videos_per_gpu=cfg.data.videos_per_gpu, workers_per_gpu=0, num_gpus=1, dist=False)
prog_bar = mmcv.ProgressBar((len(dataset) - (5 * cfg.data.videos_per_gpu)), start=False)
for (i, data) in enumerate(data_loader):
if (i == 5):
prog_bar.start()
for img in data['imgs']:
if (i < 5):
continue
prog_bar.update() |
def write_csv(table: pa.Table, path: str, *, filesystem: AbstractFileSystem, **kwargs) -> None:
with filesystem.open(path, 'wb') as f:
with pa.CompressedOutputStream(f, ContentEncoding.GZIP.value) as out:
if (kwargs.get('write_options') is None):
kwargs['write_options'] = pacsv.WriteOptions(include_header=False)
pacsv.write_csv(table, out, **kwargs) |
def plot_histogram(scores_csv: str, score_col: int, name: str, k: int, log: bool=True, clip: bool=False, maximize: bool=False):
scores = extract_scores(scores_csv, score_col)
if clip:
scores = (scores[(scores < 0)] if (not maximize) else scores[(scores >= 0)])
cutoff = (scores[k] if (not maximize) else scores[(- (k + 1))])
if log:
(fig, (ax1, ax2)) = plt.subplots(1, 2, sharex=True, figsize=(10, 4))
BINWIDTH = 0.1
for ax in (ax1, ax2):
(hist, _, _) = ax.hist(scores, color='b', edgecolor='none', bins=np.arange(min(scores), (max(scores) + BINWIDTH), BINWIDTH))
ax.axvline(cutoff, color='r', linestyle='dashed', linewidth=1)
ax.grid(True, linewidth=1, color='whitesmoke')
ax1.set_ylabel('Count')
ax2.set_yscale('log')
if (max(hist) > 10000.0):
formatter = ticker.FuncFormatter(abbreviate_k_or_M)
ax1.yaxis.set_major_formatter(formatter)
ax = fig.add_subplot(111, frameon=False)
ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
ax.set_xlabel('Score')
else:
(fig, ax) = plt.subplots(1, 1, sharex=True, figsize=(6, 4))
BINWIDTH = 0.1
ax.hist(scores, color='b', edgecolor='none', bins=np.arange(min(scores), (max(scores) + BINWIDTH), BINWIDTH))
ax.axvline(cutoff, color='r', linestyle='dashed', linewidth=1)
ax.grid(True, linewidth=1, color='whitesmoke')
ax.set_xlabel('Score')
ax.set_ylabel('Count')
fig.tight_layout()
fig.savefig(f'{name}_score_hist.pdf') |
class UpdateInitTestCase(UpdateBaseTest):
def test_init_empty(self):
update = Update([], self.config)
self.assertEqual(update, dict())
def test_init_with_reqs(self):
with patch('pyup.requirements.Requirement') as req:
req.needs_update = True
req_files = [RequirementFile('req.txt', 'django')]
update = Update(req_files, self.config)
self.assertEqual(len(update.keys()), 1) |
def main():
with tf.variable_scope('resnet'):
with tf.device(tf.train.replica_device_setter(ps_tasks=NUM_PS, ps_device='/job:ps/', worker_device='/job:worker/task:0/')):
inputs = tf.random_uniform([BATCH_SIZE, 299, 299, 3], name='Inputs')
(logit, _) = nets.resnet_v1.resnet_v1_152(inputs, 1000, scope=None)
tic = TIC(endpoint=logit)
tic.save('tic_rpc_orders.txt', partition_devices=True) |
class MobileViTIntermediate(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states |
class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = BlenderbotSmallTokenizer
def __init__(self, vocab_file=None, merges_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, trim_offsets=True, **kwargs):
super().__init__(ByteLevelBPETokenizer(vocab=vocab_file, merges=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
self.add_prefix_space = add_prefix_space
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id])
if (token_ids_1 is None):
return output
return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) |
class EnsembleDecoderOutput(object):
def __init__(self, model_dec_outs):
self.model_dec_outs = tuple(model_dec_outs)
def squeeze(self, dim=None):
return EnsembleDecoderOutput([x.squeeze(dim) for x in self.model_dec_outs])
def __getitem__(self, index):
return self.model_dec_outs[index] |
class InscDict(MutableMapping):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict__[key.lower()][(- 1)]
def __setitem__(self, key, value):
self.__dict__[key.lower()] = (key, value)
def __delitem__(self, key):
del self.__dict__[key.lower()]
def __len__(self):
return len(self.__dict__)
def __iter__(self):
return iter((key for (key, val) in self.__dict__.values()))
def __str__(self):
return f'<InscDict {self.__dict__}>'
def __eq__(self, other):
if (not isinstance(other, Mapping)):
raise TypeError
return (self.loweritems() == InscDict(other).loweritems())
def lowerkeys(self):
return self.__dict__.keys()
def loweritems(self):
return ((lowerkey, val) for (lowerkey, (key, val)) in self.__dict__.items())
def copy(self):
return InscDict(self.__dict__.values()) |
class Notification():
id: int
type: EventID
flags: EventFlag
def parse(cls, data: bytes) -> 'Notification':
[type, flags, _, _, id] = struct.unpack('<BBBBI', bytearray(data))
return cls(id=id, type=type, flags=flags)
def is_preexisting(self) -> bool:
return ((self.flags & EventFlag.PreExisting) > 0)
def is_fresh(self) -> bool:
return (not self.is_preexisting())
def has_positive_action(self) -> bool:
return ((self.flags & EventFlag.PositiveAction) > 0)
def has_negative_action(self) -> bool:
return ((self.flags & EventFlag.NegativeAction) > 0) |
def se_resnext101_32x4d(num_classes, loss, pretrained='imagenet', **kwargs):
model = SENet(num_classes=num_classes, loss=loss, block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs)
if (pretrained == 'imagenet'):
model_url = pretrained_settings['se_resnext101_32x4d']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model |
class ENet(BaseModel):
def __init__(self, num_classes, in_channels=3, freeze_bn=False, **_):
super(ENet, self).__init__()
self.initial = InitalBlock(in_channels)
self.bottleneck10 = BottleNeck(16, 64, downsample=True, p_drop=0.01)
self.bottleneck11 = BottleNeck(64, p_drop=0.01)
self.bottleneck12 = BottleNeck(64, p_drop=0.01)
self.bottleneck13 = BottleNeck(64, p_drop=0.01)
self.bottleneck14 = BottleNeck(64, p_drop=0.01)
self.bottleneck20 = BottleNeck(64, 128, downsample=True, p_drop=0.1)
self.bottleneck21 = BottleNeck(128, p_drop=0.1)
self.bottleneck22 = BottleNeck(128, dilation=2, p_drop=0.1)
self.bottleneck23 = BottleNeck(128, asymetric=True, p_drop=0.1)
self.bottleneck24 = BottleNeck(128, dilation=4, p_drop=0.1)
self.bottleneck25 = BottleNeck(128, p_drop=0.1)
self.bottleneck26 = BottleNeck(128, dilation=8, p_drop=0.1)
self.bottleneck27 = BottleNeck(128, asymetric=True, p_drop=0.1)
self.bottleneck28 = BottleNeck(128, dilation=16, p_drop=0.1)
self.bottleneck31 = BottleNeck(128, p_drop=0.1)
self.bottleneck32 = BottleNeck(128, dilation=2, p_drop=0.1)
self.bottleneck33 = BottleNeck(128, asymetric=True, p_drop=0.1)
self.bottleneck34 = BottleNeck(128, dilation=4, p_drop=0.1)
self.bottleneck35 = BottleNeck(128, p_drop=0.1)
self.bottleneck36 = BottleNeck(128, dilation=8, p_drop=0.1)
self.bottleneck37 = BottleNeck(128, asymetric=True, p_drop=0.1)
self.bottleneck38 = BottleNeck(128, dilation=16, p_drop=0.1)
self.bottleneck40 = BottleNeck(128, 64, upsample=True, p_drop=0.1, use_prelu=False)
self.bottleneck41 = BottleNeck(64, p_drop=0.1, use_prelu=False)
self.bottleneck42 = BottleNeck(64, p_drop=0.1, use_prelu=False)
self.bottleneck50 = BottleNeck(64, 16, upsample=True, p_drop=0.1, use_prelu=False)
self.bottleneck51 = BottleNeck(16, p_drop=0.1, use_prelu=False)
self.fullconv = nn.ConvTranspose2d(16, num_classes, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False)
initialize_weights(self)
if freeze_bn:
self.freeze_bn()
def forward(self, x):
x = self.initial(x)
sz1 = x.size()
(x, indices1) = self.bottleneck10(x)
x = self.bottleneck11(x)
x = self.bottleneck12(x)
x = self.bottleneck13(x)
x = self.bottleneck14(x)
sz2 = x.size()
(x, indices2) = self.bottleneck20(x)
x = self.bottleneck21(x)
x = self.bottleneck22(x)
x = self.bottleneck23(x)
x = self.bottleneck24(x)
x = self.bottleneck25(x)
x = self.bottleneck26(x)
x = self.bottleneck27(x)
x = self.bottleneck28(x)
x = self.bottleneck31(x)
x = self.bottleneck32(x)
x = self.bottleneck33(x)
x = self.bottleneck34(x)
x = self.bottleneck35(x)
x = self.bottleneck36(x)
x = self.bottleneck37(x)
x = self.bottleneck38(x)
x = self.bottleneck40(x, indices=indices2, output_size=sz2)
x = self.bottleneck41(x)
x = self.bottleneck42(x)
x = self.bottleneck50(x, indices=indices1, output_size=sz1)
x = self.bottleneck51(x)
x = self.fullconv(x)
return x
def get_backbone_params(self):
return []
def get_decoder_params(self):
return self.parameters()
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval() |
class LatentDepthwiseXCorrCls(nn.Module):
def __init__(self, in_channels, hidden, out_channels, kernel_size=3, n_latent=128, de_hidden=128, is_meta_training=True):
super(LatentDepthwiseXCorrCls, self).__init__()
self.conv_kernel = nn.Sequential(nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False), nn.BatchNorm2d(hidden), nn.ReLU(inplace=True))
self.conv_search = nn.Sequential(nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False), nn.BatchNorm2d(hidden), nn.ReLU(inplace=True))
self.head = nn.Sequential(nn.Conv2d(hidden, hidden, kernel_size=1, bias=False), nn.BatchNorm2d(hidden), nn.ReLU(inplace=True))
if (cfg.LATENTS.ENCODER_ADJUST == '0-layer'):
n_latent = hidden
if (cfg.LATENTS.DECODER_NAME == 'linear'):
self.encoder = EncoderLinear(hidden, n_latent)
else:
self.encoder = EncoderCls(hidden, n_latent)
if cfg.LATENTS.DECODER_WIDE:
de_hidden = (de_hidden * 2)
if cfg.LATENTS.DECODER_BIAS:
self.decoder = Decoder((n_latent * 4), de_hidden, hidden, out_channels, bias=True)
else:
self.decoder = Decoder((n_latent * 4), de_hidden, hidden, out_channels)
self.reconstruct_loss = nn.L1Loss()
self.is_meta_train = False
self.last_weights0 = nn.Parameter(torch.zeros(out_channels, hidden, 1, 1))
self.last_bias0 = nn.Parameter(torch.zeros(out_channels))
self.last_weights = None
self.last_bias = None
self.layer_weight = torch.zeros(1).cuda()
def update_weight(self, input, label_cls):
(latents, kl, s1) = self.encoder(input, label_cls)
(weights, bias, layer_weight) = self.decoder(latents, s1)
self.last_weights = (self.last_weights0.data + weights)
self.last_bias = (self.last_bias0.data + bias)
self.layer_weight = layer_weight
return kl
def forward(self, kernel, search):
kernel = self.conv_kernel(kernel)
search = self.conv_search(search)
feature = xcorr_depthwise(search, kernel)
out0 = self.head(feature)
if (self.last_weights is None):
self.last_weights = self.last_weights0.data
self.last_bias = self.last_bias0.data
return out0 |
def _f(mu, nu, sigma, n, a, k, collection):
if (mu == 2):
(yield _visit(n, a, k, collection))
else:
for v in _f((mu - 1), (nu - 1), ((mu + sigma) % 2), n, a, k, collection):
(yield v)
if (nu == (mu + 1)):
a[mu] = (mu - 1)
(yield _visit(n, a, k, collection))
while (a[nu] > 0):
a[nu] = (a[nu] - 1)
(yield _visit(n, a, k, collection))
elif (nu > (mu + 1)):
if (((mu + sigma) % 2) == 1):
a[(nu - 1)] = (mu - 1)
else:
a[mu] = (mu - 1)
if (((a[nu] + sigma) % 2) == 1):
for v in _b(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
else:
for v in _f(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
while (a[nu] > 0):
a[nu] = (a[nu] - 1)
if (((a[nu] + sigma) % 2) == 1):
for v in _b(mu, (nu - 1), 0, n, a, k, collection):
(yield v)
else:
for v in _f(mu, (nu - 1), 0, n, a, k, collection):
(yield v) |
class TypeclassManager(TypedObjectManager):
def smart_search(self, query):
querysplit = shlex.split(query)
(queries, plustags, plusattrs, negtags, negattrs) = ([], [], [], [], [])
for (ipart, part) in enumerate(querysplit):
(key, rest) = (part, '')
if (':' in part):
(key, rest) = part.split(':', 1)
if key.startswith('tag=='):
plustags.append((key[5:], rest))
continue
elif key.startswith('tag!='):
negtags.append((key[5:], rest))
continue
elif rest:
(value, category) = (rest, '')
if (':' in rest):
(value, category) = rest.split(':', 1)
if key.startswith('attr=='):
plusattrs.append((key[7:], value, category))
continue
elif key.startswith('attr!='):
negattrs.append((key[7:], value, category))
continue
queries.append(part)
query = ' '.join(queries)
def get(self, *args, **kwargs):
kwargs.update({'db_typeclass_path': self.model.path})
return super().get(**kwargs)
def filter(self, *args, **kwargs):
kwargs.update({'db_typeclass_path': self.model.path})
return super().filter(*args, **kwargs)
def all(self):
return super().all().filter(db_typeclass_path=self.model.path)
def first(self):
return super().filter(db_typeclass_path=self.model.path).first()
def last(self):
return super().filter(db_typeclass_path=self.model.path).last()
def count(self):
return super().filter(db_typeclass_path=self.model.path).count()
def annotate(self, *args, **kwargs):
return super(TypeclassManager, self).filter(db_typeclass_path=self.model.path).annotate(*args, **kwargs)
def values(self, *args, **kwargs):
return super(TypeclassManager, self).filter(db_typeclass_path=self.model.path).values(*args, **kwargs)
def values_list(self, *args, **kwargs):
return super(TypeclassManager, self).filter(db_typeclass_path=self.model.path).values_list(*args, **kwargs)
def _get_subclasses(self, cls):
all_subclasses = cls.__subclasses__()
for subclass in all_subclasses:
all_subclasses.extend(self._get_subclasses(subclass))
return all_subclasses
def get_family(self, **kwargs):
paths = ([self.model.path] + [('%s.%s' % (cls.__module__, cls.__name__)) for cls in self._get_subclasses(self.model)])
kwargs.update({'db_typeclass_path__in': paths})
return super().get(**kwargs)
def filter_family(self, *args, **kwargs):
paths = ([self.model.path] + [('%s.%s' % (cls.__module__, cls.__name__)) for cls in self._get_subclasses(self.model)])
kwargs.update({'db_typeclass_path__in': paths})
return super().filter(*args, **kwargs)
def all_family(self):
paths = ([self.model.path] + [('%s.%s' % (cls.__module__, cls.__name__)) for cls in self._get_subclasses(self.model)])
return super().all().filter(db_typeclass_path__in=paths) |
def test_cache_get_miss():
cache = Cache()
creator_mock = MagicMock()
creator_mock.return_value = 'created obj'
with patch_logger('pypyr.cache', logging.DEBUG) as mock_logger_debug:
obj = cache.get('one', (lambda : creator_mock('1')))
assert (obj == 'created obj')
creator_mock.assert_called_once_with('1')
mock_logger_debug.assert_called_once_with('`one` not found in cache. . . creating') |
def main(args):
save_path = './saved_model/{}'.format(args.name)
if (not os.path.exists(save_path)):
os.makedirs(save_path)
log_path = './log/{}'.format(args.name)
if (not os.path.exists(log_path)):
os.makedirs(log_path)
out_path = './output/{}'.format(args.name)
if (not os.path.exists(out_path)):
os.makedirs(out_path)
audio_bundle = torchaudio.pipelines.HUBERT_LARGE
audio_fname = os.path.join(args.root_data_dir, args.audio_fname)
template_fname = os.path.join(args.root_data_dir, args.template_fname)
mesh_sequence_fname = os.path.join(args.root_data_dir, args.mesh_sequence_fname)
weight_mask = load_mask(args)
(face_mean, face_std) = load_face_mean_std(args)
print('preparing data...')
dataset = get_dataset(args.dataset)
validation_set = dataset(audio_fname=audio_fname, template_fname=template_fname, meshes_fname=mesh_sequence_fname, audio_rate=audio_bundle.sample_rate, mode='testing', mean=face_mean, std=face_std)
face_mean = face_mean.cuda()
face_std = face_std.cuda()
audio2face = model.Audio2FaceModel(audio_bundle, (validation_set.n_vertices * 3)).cuda()
render = Renderer(validation_set.faces, validation_set.n_vertices)
if (args.dataset == 'voca'):
audio2face.load_state_dict(torch.load('saved_model/voca/model.pkl'))
elif (args.dataset == 'meshtalk'):
audio2face.load_state_dict(torch.load('saved_model/meshtalk/model.pkl'))
elif (args.dataset == 'biwi'):
audio2face.load_state_dict(torch.load('saved_model/biwi/model.pkl'))
audio2face.eval()
with torch.no_grad():
for i in range(len(validation_set)):
(audio, template_tensor, mesh_tensor, _) = validation_set.__getitem__(i)
template_tensor = template_tensor.unsqueeze(0).cuda()
audio = audio.unsqueeze(0).cuda()
mesh_tensor = mesh_tensor.unsqueeze(0).cuda()
T = mesh_tensor.shape[1]
pred_geom_self = audio2face(audio, template_tensor, mesh_tensor, T)
pred_geom_self = (((pred_geom_self.squeeze() * face_std) + face_mean.unsqueeze(0).unsqueeze(1)) * 0.001)
if (args.dataset == 'voca'):
pred_geom_self *= 1000
audio_path = validation_set.get_audio_path(i)
(subj, seq) = validation_set.get_subj_seq(i)
save_path = os.path.join(out_path, '{}_{}.npy'.format(subj, seq))
np.save(save_path, pred_geom_self.detach().cpu().numpy())
video_self_out_path = os.path.join(out_path, '{}_{}_self.mp4'.format(subj, seq))
render.to_video(pred_geom_self, audio_path, video_self_out_path, DatasetProperty.fps) |
def main(model, config):
set_seed(config.seed)
device = torch.device(config.device)
if device.type.startswith('cuda'):
torch.cuda.set_device((device.index or 0))
model_config = torch.load(config.config_load)
model_vocab = torch.load(config.vocab_load)
model_state = torch.load(config.model_load)
model = MODELS.get_model_class(model)(model_vocab, model_config)
model.load_state_dict(model_state)
model = model.to(device)
model.eval()
samples = []
n = config.n_samples
with tqdm(total=config.n_samples, desc='Generating samples') as T:
while (n > 0):
current_samples = model.sample(min(n, config.n_batch), config.max_len)
samples.extend(current_samples)
n -= len(current_samples)
T.update(len(current_samples))
samples = pd.DataFrame(samples, columns=['SMILES'])
samples.to_csv(config.gen_save, index=False) |
def make_sdist(project: TestProject, working_dir: Path) -> Path:
project_dir = (working_dir / 'project')
project_dir.mkdir(parents=True, exist_ok=True)
project.generate(project_dir)
sdist_dir = (working_dir / 'sdist')
subprocess.run([sys.executable, '-m', 'build', '--sdist', '--outdir', str(sdist_dir), str(project_dir)], check=True)
return next(sdist_dir.glob('*.tar.gz')) |
class ResNeXt(nn.Module):
def __init__(self, block, layers, sample_size=224, sample_duration=16, pretrained=True, shortcut_type='B', cardinality=32, num_classes=400):
self.inplanes = 64
super(ResNeXt, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0], shortcut_type, cardinality)
self.layer2 = self._make_layer(block, 256, layers[1], shortcut_type, cardinality, stride=2)
self.layer3 = self._make_layer(block, 512, layers[2], shortcut_type, cardinality, stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], shortcut_type, cardinality, stride=2)
last_duration = int(math.ceil((sample_duration / 16)))
last_size = int(math.ceil((sample_size / 32)))
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(((cardinality * 32) * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if pretrained:
pretrained_model = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained/resnext-101-kinetics.pth')
logging.info("Network:: graph initialized, loading pretrained model: `{}'".format(pretrained_model))
assert os.path.exists(pretrained_model), "cannot locate: `{}'".format(pretrained_model)
pretrained = torch.load(pretrained_model)
load_state(self, pretrained['state_dict'])
else:
logging.info('Network:: graph initialized, use random inilization!')
def _make_layer(self, block, planes, blocks, shortcut_type, cardinality, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if (shortcut_type == 'A'):
downsample = partial(downsample_basic_block, planes=(planes * block.expansion), stride=stride)
else:
downsample = nn.Sequential(nn.Conv3d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm3d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, cardinality, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, cardinality))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class MP2Info():
def __init__(self, qmolecule, threshold=1e-12):
(self._terms, self._mp2_delta) = _compute_mp2(qmolecule, threshold)
self._mp2_energy = (qmolecule.hf_energy + self._mp2_delta)
self._num_orbitals = qmolecule.num_orbitals
self._core_orbitals = qmolecule.core_orbitals
def mp2_delta(self):
return self._mp2_delta
def mp2_energy(self):
return self._mp2_energy
def mp2_terms(self, freeze_core=False, orbital_reduction=None):
orbital_reduction = (orbital_reduction if (orbital_reduction is not None) else [])
core_list = (self._core_orbitals if freeze_core else [])
reduce_list = orbital_reduction
reduce_list = [((x + self._num_orbitals) if (x < 0) else x) for x in reduce_list]
remove_orbitals = sorted(set(core_list).union(set(reduce_list)))
remove_spin_orbitals = (remove_orbitals + [(x + self._num_orbitals) for x in remove_orbitals])
full_spin_orbs = [*range(0, (2 * self._num_orbitals))]
remain_spin_orbs = ([(- 1)] * len(full_spin_orbs))
new_idx = 0
for (i, _) in enumerate(full_spin_orbs):
if (full_spin_orbs[i] in remove_spin_orbitals):
full_spin_orbs[i] = (- 1)
continue
remain_spin_orbs[i] = new_idx
new_idx += 1
ret_terms = {}
for (k, v) in self._terms.items():
orbs = _str_to_list(k)
if (set(orbs) <= set(full_spin_orbs)):
new_idxs = [remain_spin_orbs[elem] for elem in orbs]
(coeff, e_delta) = v
ret_terms[_list_to_str(new_idxs)] = (coeff, e_delta)
return ret_terms
def mp2_get_term_info(self, excitation_list, freeze_core=False, orbital_reduction=None):
terms = self.mp2_terms(freeze_core, orbital_reduction)
coeffs = []
e_deltas = []
for excitation in excitation_list:
if (len(excitation) != 4):
raise ValueError('Excitation entry must be of length 4')
key = _list_to_str(excitation)
if (key in terms):
(coeff, e_delta) = terms[key]
coeffs.append(coeff)
e_deltas.append(e_delta)
else:
raise ValueError('Excitation {} not present in mp2 terms'.format(excitation))
return (coeffs, e_deltas) |
class _IPC():
def unpack(data: bytes, *, is_json: (bool | None)=None) -> tuple[(Any, bool)]:
if ((is_json is None) or is_json):
try:
return (json.loads(data.decode()), True)
except ValueError as e:
if is_json:
raise IPCError('Unable to decode json data') from e
try:
assert (len(data) >= HDRLEN)
size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]
assert (size >= len(data[HDRLEN:]))
return (marshal.loads(data[HDRLEN:(HDRLEN + size)]), False)
except AssertionError as e:
raise IPCError('error reading reply! (probably the socket was disconnected)') from e
def pack(msg: Any, *, is_json: bool=False) -> bytes:
if is_json:
json_obj = json.dumps(msg, default=_IPC._json_encoder)
return json_obj.encode()
msg_bytes = marshal.dumps(msg)
size = struct.pack(HDRFORMAT, len(msg_bytes))
return (size + msg_bytes)
def _json_encoder(field: Any) -> Any:
if isinstance(field, set):
return list(field)
raise ValueError(f'Tried to JSON serialize unsupported type {type(field)}: {field}') |
.slow
_figures_equal()
def test_DecisionMatrixPlotter_heatmap(decision_matrix, fig_test, fig_ref):
dm = decision_matrix(seed=42, min_alternatives=3, max_alternatives=3, min_criteria=3, max_criteria=3)
plotter = plot.DecisionMatrixPlotter(dm=dm)
test_ax = fig_test.subplots()
plotter.heatmap(ax=test_ax)
df = dm.matrix
df.columns = [f'{c} {o.to_symbol()}' for (c, o) in zip(dm.criteria, dm.objectives)]
df.columns.name = 'Criteria'
exp_ax = fig_ref.subplots()
sns.heatmap(df, ax=exp_ax, annot=True) |
class VibrationalStructureResult(EigenstateResult):
def __init__(self) -> None:
super().__init__()
self._algorithm_result: Optional[AlgorithmResult] = None
self._computed_vibrational_energies: Optional[np.ndarray] = None
self._num_occupied_modals_per_mode: Optional[List[List[float]]] = None
def algorithm_result(self) -> Optional[AlgorithmResult]:
return self._algorithm_result
_result.setter
def algorithm_result(self, value: AlgorithmResult) -> None:
self._algorithm_result = value
def computed_vibrational_energies(self) -> Optional[np.ndarray]:
return self._computed_vibrational_energies
_vibrational_energies.setter
def computed_vibrational_energies(self, value: np.ndarray) -> None:
self._computed_vibrational_energies = value
def num_occupied_modals_per_mode(self) -> Optional[List[List[float]]]:
return self._num_occupied_modals_per_mode
_occupied_modals_per_mode.setter
def num_occupied_modals_per_mode(self, value: List[List[float]]) -> None:
self._num_occupied_modals_per_mode = value
def __str__(self) -> str:
return '\n'.join(self.formatted())
def formatted(self) -> List[str]:
lines = []
lines.append('=== GROUND STATE ===')
lines.append(' ')
lines.append(f'* Vibrational ground state energy (cm^-1): {np.round(self.computed_vibrational_energies[0], self.formatting_precision)}')
if (len(self.num_occupied_modals_per_mode) > 0):
lines.append('The number of occupied modals for each mode is: ')
for (i, m) in enumerate(self.num_occupied_modals_per_mode[0]):
lines.append(f'- Mode {i}: {np.round(m, self.formatting_precision)}')
if ((self.computed_vibrational_energies is not None) and (len(self.computed_vibrational_energies) > 1)):
lines.append(' ')
lines.append('=== EXCITED STATES ===')
lines.append(' ')
for (idx, vib_energy) in enumerate(self.computed_vibrational_energies[1:]):
lines.append(f'* {(idx + 1): 3d}: Vibrational excited state energy (cm^-1): {np.round(vib_energy, self.formatting_precision)}')
if (idx < len(self.num_occupied_modals_per_mode)):
lines.append('The number of occupied modals for each mode is')
for (i, m) in enumerate(self.num_occupied_modals_per_mode[idx]):
lines.append(f'- Mode {i}: {np.round(m, self.formatting_precision)}')
lines.append(' ')
return lines |
_stabilize
_specialize
_rewriter([log])
def local_log_add_exp(fgraph, node):
if (node.op == log):
z = node.inputs[0]
if (z.owner and (z.owner.op == add)):
zi = z.owner.inputs
pre_exp = [x.owner.inputs[0] for x in zi if (x.owner and (x.owner.op == exp))]
if (len(pre_exp) == len(zi)):
max_pre = reduce(maximum, pre_exp)
ret = (max_pre + log(add(*[switch(isinf(max_pre), exp(max_pre), exp((p - max_pre))) for p in pre_exp])))
return [ret] |
def simplified_domain_concatenation(children, mesh, copy_this=None):
concat = DomainConcatenation(children, mesh, copy_this=copy_this)
if all((isinstance(child, pybamm.StateVector) for child in children)):
longest_eval_array = len(children[(- 1)]._evaluation_array)
eval_arrays = {}
for child in children:
eval_arrays[child] = np.concatenate([child.evaluation_array, np.zeros((longest_eval_array - len(child.evaluation_array)))])
first_start = children[0].y_slices[0].start
last_stop = children[(- 1)].y_slices[(- 1)].stop
if all((sum((array for array in eval_arrays.values()))[first_start:last_stop] == 1)):
return pybamm.StateVector(slice(first_start, last_stop), domains=concat.domains)
return pybamm.simplify_if_constant(concat) |
def ArtistList():
(artists, set_artists) = use_state(['Marta Colvin Andrade', 'Lamidi Olonade Fakeye', 'Louise Nevelson'])
def handle_sort_click(event):
set_artists(sorted(artists))
def handle_reverse_click(event):
set_artists(list(reversed(artists)))
return html.div(html.h1('Inspiring sculptors:'), html.button({'on_click': handle_sort_click}, 'sort'), html.button({'on_click': handle_reverse_click}, 'reverse'), html.ul([html.li({'key': name}, name) for name in artists])) |
_server.route('/services/<service>/keys/<kid>', methods=['PUT'])
def put_service_key(service, kid):
metadata = {'ip': get_request_ip()}
rotation_duration = request.args.get('rotation', None)
expiration_date = request.args.get('expiration', None)
if (expiration_date is not None):
try:
expiration_date = datetime.utcfromtimestamp(float(expiration_date))
except ValueError:
logger.exception('Error parsing expiration date on key')
abort(400)
try:
jwk = request.get_json()
except ValueError:
logger.exception('Error parsing JWK')
abort(400)
jwt_header = request.headers.get(JWT_HEADER_NAME, '')
match = jwtutil.TOKEN_REGEX.match(jwt_header)
if (match is None):
logger.error('Could not find matching bearer token')
abort(400)
encoded_jwt = match.group(1)
_validate_jwk(jwk)
signer_kid = _signer_kid(encoded_jwt, allow_none=True)
if ((kid == signer_kid) or (signer_kid is None)):
_validate_jwt(encoded_jwt, jwk, service)
model.create_service_key('', kid, service, jwk, metadata, expiration_date, rotation_duration=rotation_duration)
logs_model.log_action('service_key_create', ip=get_request_ip(), metadata={'kid': kid, 'preshared': False, 'service': service, 'name': '', 'expiration_date': expiration_date, 'user_agent': request.headers.get('User-Agent'), 'ip': get_request_ip()})
return make_response('', 202)
metadata.update({'created_by': 'Key Rotation'})
signer_key = _lookup_service_key(service, signer_kid)
signer_jwk = signer_key.jwk
_validate_jwt(encoded_jwt, signer_jwk, service)
try:
model.replace_service_key(signer_key.kid, kid, jwk, metadata, expiration_date)
except ServiceKeyDoesNotExist:
abort(404)
logs_model.log_action('service_key_rotate', ip=get_request_ip(), metadata={'kid': kid, 'signer_kid': signer_key.kid, 'service': service, 'name': signer_key.name, 'expiration_date': expiration_date, 'user_agent': request.headers.get('User-Agent'), 'ip': get_request_ip()})
return make_response('', 200) |
def test_pth_in_site_vs_python_path(tmp_path):
session = cli_run([str(tmp_path)])
site_packages = str(session.creator.purelib)
with open(os.path.join(site_packages, 'test.pth'), 'w', encoding='utf-8') as f:
f.write('import sys; sys.testpth="ok"\n')
out = subprocess.check_output([str(session.creator.exe), '-c', 'import sys; print(sys.testpth)'], text=True, encoding='utf-8')
assert (out == 'ok\n')
env = os.environ.copy()
path = [site_packages]
if ('PYTHONPATH' in env):
path.append(env['PYTHONPATH'])
env['PYTHONPATH'] = os.pathsep.join(path)
out = subprocess.check_output([str(session.creator.exe), '-c', 'import sys; print(sys.testpth)'], text=True, env=env, encoding='utf-8')
assert (out == 'ok\n') |
class CorrMM_gradWeights(BaseCorrMM):
_direction = 'backprop weights'
def make_node(self, img, topgrad, shape=None):
img = as_tensor_variable(img)
topgrad = as_tensor_variable(topgrad)
(img, topgrad) = self.as_common_dtype(img, topgrad)
if (img.type.ndim != 4):
raise TypeError('img must be 4D tensor')
if (topgrad.type.ndim != 4):
raise TypeError('topgrad must be 4D tensor')
if (shape is None):
if ((self.subsample != (1, 1)) or (self.border_mode == 'half')):
raise ValueError('shape must be given if subsample != (1, 1) or border_mode == "half"')
height_width = []
else:
height_width = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64')]
if (self.unshared is True):
out_shape = [(1 if (topgrad.type.shape[1] == 1) else None), None, None, (1 if (img.type.shape[1] == 1) else None), None, None]
else:
out_shape = [(1 if (topgrad.type.shape[1] == 1) else None), (1 if (img.type.shape[1] == 1) else None), None, None]
dtype = img.type.dtype
return Apply(self, ([img, topgrad] + height_width), [TensorType(dtype, shape=out_shape)()])
def infer_shape(self, fgraph, node, input_shape):
if (self.border_mode == 'half'):
padH_l = padH_r = padW_l = padW_r = (- 1)
elif (self.border_mode == 'full'):
padH_l = padH_r = padW_l = padW_r = (- 2)
elif isinstance(self.border_mode, tuple):
border = ()
for mode in self.border_mode:
if isinstance(mode, tuple):
border += ((int(mode[0]), int(mode[1])),)
else:
border += ((int(mode), int(mode)),)
((padH_l, padH_r), (padW_l, padW_r)) = border
else:
assert (self.border_mode == 'valid')
padH_l = padH_r = padW_l = padW_r = 0
(dH, dW) = self.subsample
imshp = input_shape[0]
topshp = input_shape[1]
(ssize, imshp) = (imshp[1], list(imshp[2:]))
ssize = (ssize // self.num_groups)
(nkern, topshp) = (topshp[1], list(topshp[2:]))
height_width = node.inputs[(- 2):]
if ((dH != 1) or (padH_l == (- 1)) or (padH_r == (- 1))):
kH = height_width[0]
elif ((padH_l == (- 2)) or (padH_r == (- 2))):
kH = ((2 - imshp[0]) + ((topshp[0] - 1) * dH))
else:
kH = (((imshp[0] + padH_l) + padH_r) - ((topshp[0] - 1) * dH))
if ((dW != 1) or (padW_l == (- 1)) or (padW_r == (- 1))):
kW = height_width[1]
elif ((padW_l == (- 2)) or (padW_r == (- 2))):
kW = ((2 - imshp[1]) + ((topshp[1] - 1) * dW))
else:
kW = (((imshp[1] + padW_l) + padW_r) - ((topshp[1] - 1) * dW))
if (self.unshared is True):
return [(nkern, topshp[0], topshp[1], ssize, kH, kW)]
else:
return [(nkern, ssize, kH, kW)]
def c_code(self, node, nodename, inp, out_, sub):
(bottom, top) = inp[:2]
(height, width) = (inp[2:] or (None, None))
(weights,) = out_
return super().c_code_helper(bottom, weights, top, sub, height, width)
def grad(self, inp, grads):
(bottom, top) = inp[:2]
(weights,) = grads
d_bottom = CorrMM_gradInputs(self.border_mode, self.subsample, self.filter_dilation, self.num_groups, self.unshared)(weights, top, bottom.shape[(- 2):])
d_top = CorrMM(self.border_mode, self.subsample, self.filter_dilation, self.num_groups, self.unshared)(bottom, weights)
d_height_width = (((pytensor.gradient.DisconnectedType()(),) * 2) if (len(inp) == 4) else ())
return ((d_bottom, d_top) + d_height_width)
def connection_pattern(self, node):
if (node.nin == 2):
return [[1], [1]]
else:
return [[1], [1], [0], [0]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.