id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
195441
|
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from nornir_netmiko import netmiko_send_command
def send_command(task):
task.run(task=netmiko_send_command, command_string="set cli complete-on-space off")
task.run(task=netmiko_send_command, command_string="show ip interface")
def main():
nr = InitNornir(config_file="config.yaml")
nr = nr.filter(name="srx2")
agg_result = nr.run(task=send_command)
print_result(agg_result)
if __name__ == "__main__":
main()
|
195591
|
from __future__ import absolute_import, division, print_function
from trakt.core.errors import ERRORS
class RequestFailedError(Exception):
pass
class RequestError(Exception):
def __init__(self, response):
self.response = response
self.status_code = response.status_code if response is not None else None
self.error = ERRORS.get(self.status_code, ('Unknown', 'Unknown'))
# Call super class with message
super(RequestError, self).__init__('%s - "%s"' % self.error)
class ClientError(RequestError):
pass
class ServerError(RequestError):
pass
|
195617
|
from django.contrib import admin
from .models import Feature
class FeatureAdmin (admin.ModelAdmin):
list_display = ('title', 'desc', 'approved', 'closed', 'created')
list_filter = ('approved', 'closed')
search_fields = ('creator__email',)
date_hierarchy = 'created'
raw_id_fields = ('creator',)
autocomplete_lookup_fields = {'fk': ['creator']}
admin.site.register(Feature, FeatureAdmin)
|
195624
|
class DebuggerStepperBoundaryAttribute(Attribute,_Attribute):
"""
Indicates the code following the attribute is to be executed in run,not step,mode.
DebuggerStepperBoundaryAttribute()
"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __reduce_ex__(self,*args):
pass
|
195633
|
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
import numpy as np
import pytest
import os
import imageio
import matplotlib as mpl
import dps
from dps.datasets.load import load_backgrounds
from dps.datasets.base import EmnistDataset
from dps.utils import NumpySeed, resize_image
from auto_yolo.tf_ops import render_sprites
def get_session():
return tf.Session(config=tf.ConfigProto(log_device_placement=True))
def squash_01(val, squash_factor):
assert ((0 <= val) * (val <= 1)).all()
val = np.array(val, dtype=np.float32)
if squash_factor:
assert squash_factor > 0
return (val - 0.5) * squash_factor + 0.5
else:
return val
def _colorize(img, color=None):
""" Apply a color to a gray-scale image. """
color = mpl.colors.to_rgb(color)
color = np.array(color)[None, None, :]
color = np.uint8(255. * color)
rgb = np.tile(color, img.shape + (1,))
alpha = img[:, :, None]
return np.concatenate([rgb, alpha], axis=2).astype(np.uint8)
def make_patch(patch_shape, color, shape, importance):
f = os.path.join(os.path.dirname(dps.__file__), "datasets/shapes", "{}.png".format(shape))
image = imageio.imread(f)
image = resize_image(image[..., 3], patch_shape)
image = _colorize(image, color)
image = (image / 255.).astype('f')
imp = np.maximum(importance * image[..., 3:4], 0.01)
image = np.concatenate([image, imp], axis=2)
return image
def _get_data():
image_shape = (100, 100)
batch_size = 16
shapes = ((50, 50), (25, 25), (12, 12), (6, 6))
n_sprites = [4, 8, 16, 32]
# sprite_shapes = [(14, 14)]
# n_sprites = [2]
# n_sprites = [16]
sprite_shapes = [(50, 50), (25, 25), (12, 12)]
n_sprites = [2, 4, 8]
n_flights = len(n_sprites)
shapes = 'circle diamond hollow_circle plus star triangle ud_triangle x'.split()
colors = list('rgbcmy')
bg_colors = list('w')
sprites = [[] for i in range(n_flights)]
sprite_color_names = [[] for i in range(n_flights)]
sprite_shape_names = [[] for i in range(n_flights)]
backgrounds = []
for b in range(batch_size):
for i, (ss, ns) in enumerate(zip(sprite_shapes, n_sprites)):
c = np.random.choice(colors, size=ns)
s = np.random.choice(shapes, size=ns)
importances = [4**i] * ns
patches = [make_patch(ss, _c, _s, i) for _c, _s, i in zip(c, s, importances)]
sprites[i].append(patches)
sprite_color_names[i].append(c)
sprite_shape_names[i].append(s)
bg_color = np.random.choice(bg_colors)
bg_shape = np.random.choice(shapes)
bg = make_patch(image_shape, bg_color, bg_shape, 1.0)
bg = bg[..., :3]
backgrounds.append(bg)
sprites = [np.array(s).astype('f') for s in sprites]
scales = [
(np.ones((batch_size, ns, 2)) * (np.array(ss) / np.array(image_shape))).astype('f')
for ss, ns in zip(sprite_shapes, n_sprites)]
offsets = [np.random.rand(*s.shape).astype('f') * (1-s) for s in scales]
for b in range(batch_size):
print("\n\n")
print("Batch element : {}".format(b))
for f in range(n_flights):
print('\n')
print('flight : {}'.format(f))
print(sprite_color_names[f][b])
print(sprite_shape_names[f][b])
print(scales[f][b])
print(offsets[f][b])
backgrounds = np.array(backgrounds).astype('f')
return sprites, scales, offsets, backgrounds
def get_data(random_alpha=False, squash=None):
draw_shape = (56, 56)
batch_size = 2
dset = EmnistDataset(classes=[0, 1, 2, 3], include_blank=False, n_examples=100, shape=(28, 28), one_hot=False)
white = np.array([1., 1., 1.])[None, None, :]
black = np.array([0., 0., 0.])[None, None, :]
green = np.array([0., 1., 0.])[None, None, :]
cyan = np.array([0., 1., 1.])[None, None, :]
colours = [white, black, green, cyan]
sprite_pool = [dset.x[list(dset.y).index(idx)][..., None] / 255. for idx in range(4)]
_sprite_pool = []
for i, sp in enumerate(sprite_pool):
colour = colours[i]
if random_alpha:
alpha = np.random.rand(*sp[..., :1].shape)
else:
alpha = (sp.sum(-1) > 0)[..., None].astype('f')
alpha = squash_01(alpha, squash)
sp = colour * sp
sp = np.concatenate([sp, alpha], axis=-1)
_sprite_pool.append(sp)
sprite_pool = _sprite_pool
first0, first1, first2, first3 = sprite_pool
sprites0 = np.stack([first0, first1, first2, first3], axis=0)
sprites1 = np.stack([first3, first2, first1, np.zeros_like(first1)], axis=0)
sprites = np.stack([sprites0, sprites1], axis=0).astype('f')
scales = np.ones((batch_size, max_sprites, 2)).astype('f')
offsets = np.zeros_like(scales)
backgrounds = np.array(load_backgrounds("red_x blue_circle", draw_shape)) / 255.
backgrounds = backgrounds.astype('f')
sprites = squash_01(sprites, squash)
scales = squash_01(scales, squash)
offsets = squash_01(offsets, squash)
backgrounds = squash_01(backgrounds, squash)
return [sprites], [scales], [offsets], backgrounds
def run(device, show_plots, process_data=None, **get_data_kwargs):
with NumpySeed(100):
data = get_data(**get_data_kwargs)
if process_data is None:
process_data = lambda *x: x
sprites, scales, offsets, backgrounds = process_data(*data)
with tf.device('/{}:0'.format(device)):
images = render_sprites.render_sprites(sprites, scales, offsets, backgrounds)
sess = get_session()
result = sess.run(images)
result = np.clip(result, 1e-6, 1-1e-6)
if show_plots:
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(result[0])
ax2.imshow(result[1])
plt.show()
def visible_gpu():
d = os.getenv("CUDA_VISIBLE_DEVICES").split(",")[0]
try:
d = int(d)
except Exception:
return False
return d >= 0
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
def test_render_sprites_mostly_opaque(device, show_plots):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
def process_data(sprites, scales, offsets, backgrounds):
batch_size, max_sprites, *_ = sprites.shape
sprites[..., 3] = 1.0 # Make the image opaque
scales = 0.5 * np.ones((batch_size, max_sprites, 2)).astype('f')
offsets = np.array([[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]])
offsets = np.tile(offsets[None, ...], (batch_size, 1, 1)).astype('f')
return sprites, scales, offsets, backgrounds
run(device, show_plots, process_data)
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
def test_render_sprites_background_alpha(device, show_plots):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
def process_data(sprites, scales, offsets, backgrounds):
batch_size, max_sprites, *_ = sprites.shape
scales = 0.5 * np.ones((batch_size, max_sprites, 2)).astype('f')
offsets = np.array([[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]])
offsets = np.tile(offsets[None, ...], (batch_size, 1, 1)).astype('f')
return sprites, scales, offsets, backgrounds
run(device, show_plots, process_data)
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
def test_render_sprites_overlap(device, show_plots):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
run(device, show_plots)
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
@pytest.mark.slow
def _test_gradient(device):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
with NumpySeed(100):
with tf.device('/{}:0'.format(device)):
sprites, scales, offsets, backgrounds = get_data(random_alpha=True, squash=0.99)
sprites_tf = constant_op.constant(sprites)
scales_tf = constant_op.constant(scales)
offsets_tf = constant_op.constant(offsets)
backgrounds_tf = constant_op.constant(backgrounds)
images = render_sprites.render_sprites(sprites_tf, scales_tf, offsets_tf, backgrounds_tf)
sess = get_session()
with sess.as_default():
with tf.device(device):
err = gradient_checker.compute_gradient_error(
[sprites_tf, scales_tf, offsets_tf, backgrounds_tf],
[sprites.shape, scales.shape, offsets.shape, backgrounds.shape],
images,
backgrounds.shape,
[sprites, scales, offsets, backgrounds],
delta=0.002)
print("Jacobian error: {}".format(err))
threshold = 2e-4
assert err < threshold, "Jacobian error ({}) exceeded threshold ({})".format(err, threshold)
if __name__ == "__main__":
from contextlib import ExitStack
with NumpySeed(100000):
sprites, scales, offsets, backgrounds = _get_data()
device = 'gpu'
print("Running...")
session_config = tf.ConfigProto()
session_config.log_device_placement = 1
session_config.gpu_options.per_process_gpu_memory_fraction = 0.1
session_config.gpu_options.allow_growth = True
graph = tf.Graph()
sess = tf.Session(graph=graph, config=session_config)
with ExitStack() as stack:
stack.enter_context(graph.as_default())
stack.enter_context(sess)
stack.enter_context(sess.as_default())
sprites_ph = [tf.placeholder(tf.float32, (None, *s.shape[1:])) for s in sprites]
scales_ph = [tf.placeholder(tf.float32, (None, *s.shape[1:])) for s in scales]
offsets_ph = [tf.placeholder(tf.float32, (None, *s.shape[1:])) for s in offsets]
backgrounds_ph = tf.placeholder(tf.float32, (None, *backgrounds.shape[1:]))
with tf.device('/{}:0'.format(device)):
images = render_sprites.render_sprites(sprites_ph, scales_ph, offsets_ph, backgrounds_ph)
d = {}
d.update({ph: a for ph, a in zip(sprites_ph, sprites)})
d.update({ph: a for ph, a in zip(scales_ph, scales)})
d.update({ph: a for ph, a in zip(offsets_ph, offsets)})
d[backgrounds_ph] = backgrounds
result = sess.run(images, feed_dict=d)
from dps.utils import image_to_string
print(image_to_string(result[0, ..., 0]))
print()
print(image_to_string(result[0, ..., 1]))
print()
print(image_to_string(result[0, ..., 2]))
print()
print(result)
print("Done running.")
# Sometimes we get values like 1.0001, nothing really bad.
result = np.clip(result, 1e-6, 1-1e-6)
import matplotlib.pyplot as plt
from dps.utils import square_subplots
fig, axes = square_subplots(len(sprites[0]))
fig.suptitle(device)
for img, ax in zip(result, axes.flatten()):
ax.imshow(img)
plt.show()
|
195678
|
from django.dispatch import Signal
user_logged_in = Signal()
user_login_failed = Signal()
user_logged_out = Signal()
|
195798
|
import matplotlib.pyplot as plt
import LensSim
from LensSystem import LensSystem
if __name__ == "__main__":
# load lens
lsys = LensSystem("../data/dgauss50mm.json", width=512,
height=512, width_length=0.025, height_length=0.025)
# print lens parameters
print("EFL: {0}".format(lsys.effective_focal_length()))
print("FFL: {0}".format(lsys.front_focal_length()))
print("BFL: {0}".format(lsys.back_focal_length()))
# plot optical path diagram
lsys.optical_path_diagram()
plt.show()
|
195824
|
from functools import singledispatch, reduce
import sympy
try:
import symengine
except ImportError:
class Mock:
def __getattribute__(self, name):
return Mock
symengine = Mock()
import gem
@singledispatch
def sympy2gem(node, self):
raise AssertionError("sympy/symengine node expected, got %s" % type(node))
@sympy2gem.register(sympy.Expr)
@sympy2gem.register(symengine.Expr)
def sympy2gem_expr(node, self):
raise NotImplementedError("no handler for sympy/symengine node type %s" % type(node))
@sympy2gem.register(sympy.Add)
@sympy2gem.register(symengine.Add)
def sympy2gem_add(node, self):
return reduce(gem.Sum, map(self, node.args))
@sympy2gem.register(sympy.Mul)
@sympy2gem.register(symengine.Mul)
def sympy2gem_mul(node, self):
return reduce(gem.Product, map(self, node.args))
@sympy2gem.register(sympy.Pow)
@sympy2gem.register(symengine.Pow)
def sympy2gem_pow(node, self):
return gem.Power(*map(self, node.args))
@sympy2gem.register(sympy.Integer)
@sympy2gem.register(symengine.Integer)
@sympy2gem.register(int)
def sympy2gem_integer(node, self):
return gem.Literal(int(node))
@sympy2gem.register(sympy.Float)
@sympy2gem.register(symengine.Float)
@sympy2gem.register(float)
def sympy2gem_float(node, self):
return gem.Literal(float(node))
@sympy2gem.register(sympy.Symbol)
@sympy2gem.register(symengine.Symbol)
def sympy2gem_symbol(node, self):
return self.bindings[node]
@sympy2gem.register(sympy.Rational)
@sympy2gem.register(symengine.Rational)
def sympy2gem_rational(node, self):
return gem.Division(*(map(self, node.as_numer_denom())))
|
195831
|
import os
import platform
import numpy as np
from simtk import unit
import time
import pytest
from testsystems.relative import hif2a_ligand_pair
from md.builders import build_water_system
from md.minimizer import minimize_host_4d
from fe.free_energy import AbsoluteFreeEnergy
from md.states import CoordsVelBox
from md.ensembles import PotentialEnergyModel, NPTEnsemble
from md.thermostat.moves import UnadjustedLangevinMove
from md.barostat.moves import MonteCarloBarostat, CentroidRescaler
from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center
from md.utils import simulate_npt_traj
from md.thermostat.utils import sample_velocities
from timemachine.lib import LangevinIntegrator, custom_ops
from functools import partial
from timemachine.constants import BOLTZ, ENERGY_UNIT, DISTANCE_UNIT
def test_barostat_zero_interval():
pressure = 1.0 * unit.atmosphere
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 2.5 * unit.nanometer
barostat_interval = 0
seed = 2021
np.random.seed(seed)
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
bound_potentials = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
u_impls = []
for bp in bound_potentials:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
with pytest.raises(RuntimeError):
custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
0,
u_impls,
seed,
)
# Setting it to 1 should be valid.
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
1,
u_impls,
seed,
)
# Setting back to 0 should raise another error
with pytest.raises(RuntimeError):
baro.set_interval(0)
def get_platform_version() -> str:
release_path = "/etc/os-release"
if os.path.isfile(release_path):
# AWS Ubuntu 20.04 doesn't have version in uname...
with open(release_path, "r") as ifs:
for line in ifs.readlines():
if line.startswith("PRETTY_NAME="):
platform_version = line.strip()
else:
platform_version = platform.version()
return platform_version.lower()
def test_barostat_partial_group_idxs():
"""Verify that the barostat can handle a subset of the molecules
rather than all of them. This test only verify that it runs, not the behavior"""
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
barostat_interval = 3
collision_rate = 1.0 / unit.picosecond
seed = 2021
np.random.seed(seed)
pressure = 1.0 * unit.atmosphere
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
# Cut the number of groups in half
group_indices = group_indices[len(group_indices) // 2 :]
lam = 1.0
bound_potentials = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
u_impls = []
for bp in bound_potentials:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(coords, v_0, complex_box, integrator_impl, u_impls, barostat=baro)
ctxt.multiple_steps(np.ones(1000) * lam)
def test_barostat_is_deterministic():
"""Verify that the barostat results in the same box size shift after 1000
steps. This is important to debugging as well as providing the ability to replicate
simulations
"""
platform_version = get_platform_version()
lam = 1.0
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
barostat_interval = 3
collision_rate = 1.0 / unit.picosecond
seed = 2021
np.random.seed(seed)
# OpenEye's AM1 Charging values are OS platform dependent. To ensure that we have deterministic values
# we check against our two most common OS versions, Ubuntu 18.04 and 20.04.
box_vol = 26.869380588831582
lig_charge_vals = np.array(
[1.4572377542719206, -0.37011462071257184, 1.1478267014520305, -4.920284514559682, 0.16985194917937935]
)
if "ubuntu" not in platform_version:
print(f"Test expected to run under ubuntu 20.04 or 18.04, got {platform_version}")
if "18.04" in platform_version:
box_vol = 26.711716908713402
lig_charge_vals[3] = -4.920166483601927
pressure = 1.0 * unit.atmosphere
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
u_impls = []
# Look at the first five atoms and their assigned charges
ligand_charges = sys_params[-1][:, 0][len(min_complex_coords) :][:5]
np.testing.assert_array_almost_equal(lig_charge_vals, ligand_charges, decimal=5)
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(coords, v_0, complex_box, integrator_impl, u_impls, barostat=baro)
ctxt.multiple_steps(np.ones(1000) * lam)
atm_box = ctxt.get_box()
np.testing.assert_almost_equal(compute_box_volume(atm_box), box_vol, decimal=5)
def test_barostat_varying_pressure():
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
barostat_interval = 3
collision_rate = 1.0 / unit.picosecond
seed = 2021
np.random.seed(seed)
# Start out with a very large pressure
pressure = 1000.0 * unit.atmosphere
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
lam = 1.0
u_impls = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(coords, v_0, complex_box, integrator_impl, u_impls, barostat=baro)
ctxt.multiple_steps(np.ones(1000) * lam)
ten_atm_box = ctxt.get_box()
ten_atm_box_vol = compute_box_volume(ten_atm_box)
# Expect the box to shrink thanks to the barostat
assert compute_box_volume(complex_box) - ten_atm_box_vol > 0.4
# Set the pressure to 1 bar
baro.set_pressure((1 * unit.atmosphere).value_in_unit(unit.bar))
# Changing the barostat interval resets the barostat step.
baro.set_interval(2)
ctxt.multiple_steps(np.ones(2000) * lam)
atm_box = ctxt.get_box()
# Box will grow thanks to the lower pressure
assert compute_box_volume(atm_box) > ten_atm_box_vol
def test_molecular_ideal_gas():
"""
References
----------
OpenMM testIdealGas
https://github.com/openmm/openmm/blob/d8ef57fed6554ec95684e53768188e1f666405c9/tests/TestMonteCarloBarostat.h#L86-L140
"""
# simulation parameters
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
collision_rate = 1.0 / unit.picosecond
n_moves = 10000
barostat_interval = 5
seed = 2021
# thermodynamic parameters
temperatures = np.array([300, 600, 1000]) * unit.kelvin
pressure = 100.0 * unit.bar # very high pressure, to keep the expected volume small
# generate an alchemical system of a waterbox + alchemical ligand:
# effectively discard ligands by running in AbsoluteFreeEnergy mode at lambda = 1.0
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
_unbound_potentials, _sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# drop the nonbonded potential
unbound_potentials = _unbound_potentials[:-1]
sys_params = _sys_params[:-1]
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
volume_trajs = []
lam = 1.0
relative_tolerance = 1e-2
initial_relative_box_perturbation = 2 * relative_tolerance
n_molecules = complex_top.getNumResidues()
bound_potentials = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
u_impls = []
for bp in bound_potentials:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
# expected volume
md_pressure_unit = ENERGY_UNIT / DISTANCE_UNIT ** 3
pressure_in_md = (pressure * unit.AVOGADRO_CONSTANT_NA).value_in_unit(md_pressure_unit)
expected_volume_in_md = (n_molecules + 1) * BOLTZ * temperatures.value_in_unit(unit.kelvin) / pressure_in_md
for i, temperature in enumerate(temperatures):
# define a thermostat
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
# rescale the box to be approximately the desired box volume already
rescaler = CentroidRescaler(group_indices)
initial_volume = compute_box_volume(complex_box)
initial_center = compute_box_center(complex_box)
length_scale = ((1 + initial_relative_box_perturbation) * expected_volume_in_md[i] / initial_volume) ** (
1.0 / 3
)
new_coords = rescaler.scale_centroids(coords, initial_center, length_scale)
new_box = complex_box * length_scale
baro = custom_ops.MonteCarloBarostat(
new_coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(new_coords, v_0, new_box, integrator_impl, u_impls, barostat=baro)
vols = []
for move in range(n_moves // barostat_interval):
ctxt.multiple_steps(np.ones(barostat_interval))
new_box = ctxt.get_box()
volume = np.linalg.det(new_box)
vols.append(volume)
volume_trajs.append(vols)
equil_time = len(volume_trajs[0]) // 2 # TODO: don't hard-code this?
actual_volume_in_md = np.array([np.mean(volume_traj[equil_time:]) for volume_traj in volume_trajs])
np.testing.assert_allclose(actual=actual_volume_in_md, desired=expected_volume_in_md, rtol=relative_tolerance)
|
195833
|
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from vms.models import Dc
from gui.decorators import staff_required, ajax_required, admin_required, profile_required
from gui.utils import collect_view_data, redirect, get_query_string, Messages
from gui.dc.base.utils import get_dcs_extended
from gui.dc.base.forms import DcForm, DcSettingsForm, DefaultDcSettingsForm
from api.dc.utils import get_dc_or_404
def _dc_settings_msg_updated(request):
messages.success(request, _('Datacenter settings were successfully updated'))
def _dc_settings_msg_error(context):
msg = Messages()
msg.error(_('Datacenter settings were not updated. Please correct errors below'))
context['error'] = msg
@login_required
@admin_required
@profile_required
def dc_list(request):
"""
Datacenter management.
"""
context = collect_view_data(request, 'dc_list')
context['can_edit'] = can_edit = request.user.is_staff # DC owners have read-only rights
if can_edit:
pr = ('roles',)
context['all'] = _all = bool(request.GET.get('all', False))
context['form'] = DcForm(request, None, initial={'access': Dc.PRIVATE, 'owner': request.user.username})
context['settings_form'] = DcSettingsForm(request, None)
context['can_add'] = settings.VMS_DC_ENABLED
context['colspan'] = 9
else:
_all = False
pr = None # Groups are only visible by SuperAdmins
context['colspan'] = 8
context['qs'] = get_query_string(request, all=_all).urlencode()
context['dcs'] = get_dcs_extended(request, pr=pr)
return render(request, 'gui/dc/dc_list.html', context)
@login_required
@staff_required
@ajax_required
@require_POST
def dc_form(request):
"""
Ajax page for creating or updating datacenter.
"""
if request.POST['action'] == 'create':
dc = None
else:
dc = get_dc_or_404(request, request.POST['name'], api=False)
form = DcForm(request, dc, request.POST)
if form.is_valid():
status = form.save(args=(form.cleaned_data.get('name'),))
if status == 204:
return HttpResponse(None, status=status)
elif status in (200, 201):
messages.success(request, _('Datacenter settings were successfully updated'))
return redirect('dc_list', query_string=request.GET)
return render(request, 'gui/dc/dc_form.html', {'form': form})
@login_required
@staff_required
@ajax_required
@require_POST
def dc_settings_form(request):
"""
Ajax page for changing advanced datacenter settings.
"""
dc = get_dc_or_404(request, request.POST['dc'], api=False)
form = DcSettingsForm(request, dc, request.POST)
context = {'form': form}
if form.is_valid():
status = form.save(args=(dc.name,))
if status == 204:
return HttpResponse(None, status=status)
elif status in (200, 201):
_dc_settings_msg_updated(request)
return redirect('dc_list', query_string=request.GET)
_dc_settings_msg_error(context)
return render(request, 'gui/dc/dc_settings_form.html', context)
def _dc_settings_table(request, data=None):
"""Create basic context for rendering dc_settings_table.html"""
dc = request.dc
_all = bool(request.GET.get('all', False))
if _all:
form_class = DefaultDcSettingsForm
else:
form_class = DcSettingsForm
form = form_class(request, dc, data=data, init=True, table=True, disable_globals=_all and not dc.is_default())
return {
'all': _all,
'form': form,
'qs': get_query_string(request, all=_all).urlencode(),
'msg_global_setting': _('Global setting')
}
@login_required
@staff_required
def dc_settings(request):
"""
Own page for datacenter settings. When used in the default DC we display all Danube Cloud settings.
"""
context = collect_view_data(request, 'dc_settings')
context.update(_dc_settings_table(request))
context['form'].set_mon_zabbix_server_login_error()
return render(request, 'gui/dc/dc_settings.html', context)
@login_required
@staff_required
@ajax_required
@require_POST
def dc_settings_table(request):
"""
Ajax page for changing all datacenter settings.
"""
context = _dc_settings_table(request, data=request.POST)
form = context['form']
if form.is_valid():
status = form.save(args=(request.dc.name,), action='update')
if status == 204:
return HttpResponse(None, status=status)
elif status in (200, 201):
_dc_settings_msg_updated(request)
return redirect('dc_settings', query_string=request.GET)
_dc_settings_msg_error(context)
form.set_mon_zabbix_server_login_error()
return render(request, 'gui/dc/dc_settings_table.html', context)
|
195907
|
from construct import Bytes, Int8ul, Int64ul, Padding
from construct import Struct as cStruct
from .account_flags import ACCOUNT_FLAGS_LAYOUT
MARKET_LAYOUT = cStruct(
Padding(5),
"account_flags" / ACCOUNT_FLAGS_LAYOUT,
"own_address" / Bytes(32),
"vault_signer_nonce" / Int64ul,
"base_mint" / Bytes(32),
"quote_mint" / Bytes(32),
"base_vault" / Bytes(32),
"base_deposits_total" / Int64ul,
"base_fees_accrued" / Int64ul,
"quote_vault" / Bytes(32),
"quote_deposits_total" / Int64ul,
"quote_fees_accrued" / Int64ul,
"quote_dust_threshold" / Int64ul,
"request_queue" / Bytes(32),
"event_queue" / Bytes(32),
"bids" / Bytes(32),
"asks" / Bytes(32),
"base_lot_size" / Int64ul,
"quote_lot_size" / Int64ul,
"fee_rate_bps" / Int64ul,
"referrer_rebate_accrued" / Int64ul,
Padding(7),
)
MINT_LAYOUT = cStruct(Padding(44), "decimals" / Int8ul, Padding(37))
|
195977
|
import os
import re
from setuptools import setup
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: MIT',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows :: Windows 95/98/2000',
'Topic :: System :: Systems Administration'
]
base_dir = os.path.dirname(__file__)
DUNDER_ASSIGN_RE = re.compile(r"""^__\w+__\s*=\s*['"].+['"]$""")
about = {}
with open(os.path.join(base_dir, "wmi.py"), "rb") as f:
for line in f.read().decode("utf-8").splitlines():
if DUNDER_ASSIGN_RE.search(line):
exec(line, about)
changes = ""
TO_STRIP = set([":class:", ":mod:", ":meth:", ":func:", ":doc:"])
with open(os.path.join(base_dir, "README.rst"), "rb") as f:
readme = f.read().decode("utf-8")
for s in TO_STRIP:
readme = readme.replace(s, "")
install_requires = [
"pywin32"
]
extras_require = {
"tests": [
"pytest",
"tox"
],
"docs": ["sphinx"],
"package": [
# Wheel building and PyPI uploading
"wheel",
"twine",
],
}
extras_require["dev"] = (
extras_require["tests"]
+ extras_require["docs"]
+ extras_require["package"]
)
extras_require["all"] = list(
{req for extra, reqs in extras_require.items() for req in reqs}
)
setup (
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description="{}\n\n{}".format(readme, changes),
long_description_content_type = "text/x-rst",
author=about["__author__"],
author_email=about["__email__"],
url=about["__url__"],
license=about["__license__"],
py_modules = ["wmi"],
install_requires=install_requires,
extras_require=extras_require,
scripts = ["wmitest.py", "wmiweb.py", "wmitest.cmd", "wmitest.master.ini"],
data_files = ["readme.rst"]
)
|
195994
|
import pytest
from leapp.libraries import stdlib
from leapp.libraries.actor import scankernel
from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
from leapp.libraries.stdlib import api
TARGET_KERNEL_VERSION = '1.2.3-4.el8.x86_64'
TARGET_RT_KERNEL_VERSION = '1.2.3-4.rt56.7.el8.x86_64'
TARGET_KERNEL = 'kernel-{}'.format(TARGET_KERNEL_VERSION)
TARGET_RT_KERNEL = 'kernel-{}'.format(TARGET_RT_KERNEL_VERSION)
OLD_KERNEL = 'kernel-0.1.2-3.el7.x86_64'
OLD_RT_KERNEL = 'kernel-rt-0.1.2-3.rt4.5.el7.x86_64'
class MockedRun(object):
def __init__(self, stdouts):
# stdouts should be dict of list of strings: { str: [str1,str2,...]}
self._stdouts = stdouts
def __call__(self, *args, **kwargs):
for key in ('kernel', 'kernel-rt'):
if key in args[0]:
return {'stdout': self._stdouts.get(key, [])}
return {'stdout': []}
@pytest.mark.parametrize('is_rt,exp_version,stdouts', [
(False, TARGET_KERNEL_VERSION, {'kernel': [OLD_KERNEL, TARGET_KERNEL]}),
(False, TARGET_KERNEL_VERSION, {'kernel': [TARGET_KERNEL, OLD_KERNEL]}),
(False, TARGET_KERNEL_VERSION, {
'kernel': [TARGET_KERNEL, OLD_KERNEL],
'kernel-rt': [TARGET_RT_KERNEL, OLD_RT_KERNEL],
}),
(True, TARGET_RT_KERNEL_VERSION, {'kernel-rt': [OLD_RT_KERNEL, TARGET_RT_KERNEL]}),
(True, TARGET_RT_KERNEL_VERSION, {'kernel-rt': [TARGET_RT_KERNEL, OLD_RT_KERNEL]}),
(True, TARGET_RT_KERNEL_VERSION, {
'kernel': [TARGET_KERNEL, OLD_KERNEL],
'kernel-rt': [TARGET_RT_KERNEL, OLD_RT_KERNEL],
}),
])
def test_scaninstalledkernel(monkeypatch, is_rt, exp_version, stdouts):
result = []
old_kver = '0.1.2-3.rt4.5.el7.x86_64' if is_rt else 'kernel-0.1.2-3.el7.x86_64'
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(kernel=old_kver))
monkeypatch.setattr(api, 'produce', result.append)
monkeypatch.setattr(scankernel, 'run', MockedRun(stdouts))
scankernel.process()
assert len(result) == 1 and result[0].version == exp_version
def test_scaninstalledkernel_missing_rt(monkeypatch):
result = []
old_kver = '0.1.2-3.rt4.5.el7.x86_64'
stdouts = {'kernel': [TARGET_KERNEL], 'kernel-rt': [OLD_RT_KERNEL]}
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(kernel=old_kver))
monkeypatch.setattr(api, 'current_logger', logger_mocked())
monkeypatch.setattr(api, 'produce', result.append)
monkeypatch.setattr(scankernel, 'run', MockedRun(stdouts))
scankernel.process()
assert api.current_logger.warnmsg
assert len(result) == 1 and result[0].version == TARGET_KERNEL_VERSION
def test_scaninstalledkernel_missing(monkeypatch):
result = []
old_kver = '0.1.2-3.rt4.5.el7.x86_64'
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(kernel=old_kver))
monkeypatch.setattr(api, 'current_logger', logger_mocked())
monkeypatch.setattr(api, 'produce', result.append)
monkeypatch.setattr(scankernel, 'run', MockedRun({}))
scankernel.process()
assert api.current_logger.warnmsg
assert api.current_logger.errmsg
assert not result
|
195998
|
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import scipy.ndimage
import scipy.signal
import shutil
import display_pyutils
# Load the FOCUS packageimport sys
import sys
sys.path.append('/home/allie/projects/focus') # Just to remember where this path is from!
IM_DIR = '/home/allie/workspace/images'
def apply_averaging_filter(x, filter_size=5):
return np.convolve(signal, np.ones(filter_size,) / float(filter_size), mode='valid')
def apply_median_filter(x, filter_size=5):
return scipy.signal.medfilt(x, filter_size)
if __name__ == '__main__':
results_dirs = sorted(glob.glob('/home/allie/workspace/server_sync/*/*'))
fignum = 0
plt.close('all')
anomalousness_to_save = []
pars_to_save = []
for results_dir in reversed(results_dirs):
fignum+=1
print(results_dir)
print(os.path.join(results_dir, 'anomaly_ratings.npy'))
try:
anomalousness = np.load(os.path.join(results_dir, 'anomaly_ratings.npy'))
signal = anomalousness/(1.0-anomalousness)
# Smooth temporally
signal = apply_averaging_filter(signal, 100)
pars = pickle.load(open(os.path.join(results_dir, 'pars.pickle'), 'rb'))
feats_file = pars.paths.files.infile_features
plt.figure(fignum)
plt.fill_between(range(len(signal)), signal, facecolor=display_pyutils.GOOD_COLOR_CYCLE[0], alpha=1.0) # alpha=0.5
signal_sorted = np.sort(signal)
bottom_ninetyfive_percent = signal_sorted[:int(np.floor(len(signal_sorted) * 0.95))]
y_max = np.median(bottom_ninetyfive_percent) + 3*np.std(bottom_ninetyfive_percent)
plt.ylim([0, y_max])
videonum_as_str = os.path.basename(feats_file)
lambd = pars.algorithm.discriminability.lambd
max_buffer_size = pars.algorithm.discriminability.max_buffer_size
title = 'video: {}\nlambda: {}\nmax_buffer_size:{}'.format(videonum_as_str, lambd, max_buffer_size)
plt.title(title)
print('Saving figure to {}.png in workspace'.format(plt.gcf().number))
display_pyutils.save_fig_to_workspace()
results_figure_name = os.path.join(results_dir, 'anomaly_rating.png')
display_pyutils.savefig(results_figure_name)
print('Saving figure to {}'.format(results_figure_name))
thresholded_anom_results = (signal > (np.median(signal) + 2 * np.std(bottom_ninetyfive_percent))).astype(float) * signal
plt.clf()
plt.fill_between(range(len(thresholded_anom_results)), thresholded_anom_results, facecolor=display_pyutils.GOOD_COLOR_CYCLE[1], alpha=1.0, label='anomalous: {:.4g}%'.format(100.0 * np.sum(thresholded_anom_results > 0) / len(thresholded_anom_results)))
plt.legend()
plt.ylim([0, y_max])
plt.title(title)
print('Saving figure to {}'.format(results_figure_name.replace('rating', 'rating_thresholded')))
display_pyutils.savefig(results_figure_name.replace('rating', 'rating_thresholded'))
if videonum_as_str.find('1101') != -1 and lambd == 10:
print('results_dir of interest: {}'.format(results_dir))
anomalousness_to_save += [anomalousness]
pars_to_save += [pars]
anomalous_frames = [os.path.join('/home/allie/projects/aladdin/videos/LGW_20071101_E1_CAM1frames', 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
destination_frames = [os.path.join('/home/allie/workspace/etc/1101_results', 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
for src, dest in zip(anomalous_frames, destination_frames):
shutil.copyfile(src, dest)
video_id = '1108'
if videonum_as_str.find(video_id) != -1 and lambd == 10:
print('results_dir of interest: {}'.format(results_dir))
anomalousness_to_save += [anomalousness]
pars_to_save += [pars]
destination_dir = '/home/allie/workspace/etc/{}_results'.format(video_id)
os.mkdir(destination_dir)
anomalous_frames = [os.path.join('/home/allie/projects/aladdin/videos/LGW_2007{}_E1_CAM1frames'.format(video_id), 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
destination_frames = [os.path.join(destination_dir, 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
for src, dest in zip(anomalous_frames, destination_frames):
shutil.copyfile(src, dest)
except Exception as exc:
print(exc)
print('continuing...')
|
196047
|
import numpy as np
import numpy.random as nr
from rlkit.exploration_strategies.base import RawExplorationStrategy
from rlkit.core.serializable import Serializable
class OUStrategy(RawExplorationStrategy, Serializable):
"""
This strategy implements the Ornstein-Uhlenbeck process, which adds
time-correlated noise to the actions taken by the deterministic policy.
The OU process satisfies the following stochastic differential equation:
dxt = theta*(mu - xt)*dt + sigma*dWt
where Wt denotes the Wiener process
Based on the rllab implementation.
"""
def __init__(
self,
action_space,
mu=0,
theta=0.15,
max_sigma=0.3,
min_sigma=0.3,
decay_period=100000,
):
assert len(action_space.shape) == 1
Serializable.quick_init(self, locals())
if min_sigma is None:
min_sigma = max_sigma
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self._max_sigma = max_sigma
if min_sigma is None:
min_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self.dim = np.prod(action_space.low.shape)
self.low = action_space.low
self.high = action_space.high
self.state = np.ones(self.dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
def get_action_from_raw_action(self, action, t=0, **kwargs):
ou_state = self.evolve_state()
self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, t * 1.0 / self._decay_period
)
return np.clip(action + ou_state, self.low, self.high)
def get_actions_from_raw_actions(self, actions, t=0, **kwargs):
noise = (
self.state
+ self.theta * (self.mu - self.state)
+ self.sigma * nr.randn(*actions.shape)
)
return np.clip(actions + noise, self.low, self.high)
|
196091
|
import numpy as np
from inferelator.utils import Validator as check
from inferelator import utils
from inferelator.regression import base_regression
from inferelator.distributed.inferelator_mp import MPControl
from sklearn.base import BaseEstimator
from inferelator.regression.base_regression import _MultitaskRegressionWorkflowMixin
import copy
import inspect
def sklearn_gene(x, y, model, min_coef=None, **kwargs):
"""
Use a scikit-learn model for regression
:param x: Feature array
:type x: np.ndarray [N x K]
:param y: Response array
:type y: np.ndarray [N x 1]
:param model: Instance of a scikit BaseEstimator-derived model
:type model: BaseEstimator
:param min_coef: A minimum coefficient value to include in the model. Any values smaller will be set to 0.
:type min_coef: numeric
:return: A dict of results for this gene
:rtype: dict
"""
assert check.argument_type(x, np.ndarray)
assert check.argument_type(y, np.ndarray)
assert check.argument_is_subclass(model, BaseEstimator)
(N, K) = x.shape
# Fit the model
model.fit(x, y, **kwargs)
# Get all model coefficients [K, ]
try:
coefs = model.coef_
except AttributeError:
coefs = model.estimator_.coef_
# Set coefficients below threshold to 0
if min_coef is not None:
coefs[np.abs(coefs) < min_coef] = 0. # Threshold coefficients
coef_nonzero = coefs != 0 # Create a boolean array where coefficients are nonzero [K, ]
# If there are non-zero coefficients, redo the linear regression with them alone
# And calculate beta_resc
if coef_nonzero.sum() > 0:
x = x[:, coef_nonzero]
utils.make_array_2d(y)
betas = base_regression.recalculate_betas_from_selected(x, y)
betas_resc = base_regression.predict_error_reduction(x, y, betas)
return dict(pp=coef_nonzero,
betas=betas,
betas_resc=betas_resc)
else:
return dict(pp=np.repeat(True, K).tolist(),
betas=np.zeros(K),
betas_resc=np.zeros(K))
class SKLearnRegression(base_regression.BaseRegression):
def __init__(self, x, y, model, random_state=None, **kwargs):
self.params = kwargs
if random_state is not None:
self.params["random_state"] = random_state
self.min_coef = self.params.pop("min_coef", None)
self.model = model(**self.params)
super(SKLearnRegression, self).__init__(x, y)
def regress(self):
"""
Execute Elastic Net
:return: list
Returns a list of regression results that base_regression's pileup_data can process
"""
if MPControl.is_dask():
from inferelator.distributed.dask_functions import sklearn_regress_dask
return sklearn_regress_dask(self.X, self.Y, self.model, self.G, self.genes, self.min_coef)
def regression_maker(j):
level = 0 if j % 100 == 0 else 2
utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=self.genes[j], i=j, total=self.G), level=level)
data = sklearn_gene(self.X.values,
utils.scale_vector(self.Y.get_gene_data(j, force_dense=True, flatten=True)),
copy.copy(self.model),
min_coef=self.min_coef)
data['ind'] = j
return data
return MPControl.map(regression_maker, range(self.G), tell_children=False)
class SKLearnWorkflowMixin(base_regression._RegressionWorkflowMixin):
"""
Use any scikit-learn regression module
"""
_sklearn_model = None
_sklearn_model_params = None
_sklearn_add_random_state = False
def __init__(self, *args, **kwargs):
self._sklearn_model_params = {}
super(SKLearnWorkflowMixin, self).__init__(*args, **kwargs)
def set_regression_parameters(self, model=None, add_random_state=None, **kwargs):
"""
Set parameters to use a sklearn model for regression
:param model: A scikit-learn model class
:type model: BaseEstimator subclass
:param add_random_state: Flag to include workflow random seed as "random_state" in the model
:type add_random_state: bool
:param kwargs: Any arguments which should be passed to the scikit-learn model class instantiation
:type kwargs: any
"""
if model is not None and not inspect.isclass(model):
raise ValueError("Pass an uninstantiated scikit-learn model (i.e. LinearRegression, not LinearRegression()")
self._set_with_warning("_sklearn_model", model)
self._set_without_warning("_sklearn_add_random_state", add_random_state)
self._sklearn_model_params.update(kwargs)
def run_bootstrap(self, bootstrap):
x = self.design.get_bootstrap(bootstrap)
y = self.response.get_bootstrap(bootstrap)
utils.Debug.vprint('Calculating betas using SKLearn model {m}'.format(m=self._sklearn_model.__name__), level=0)
return SKLearnRegression(x,
y,
self._sklearn_model,
random_state=self.random_seed if self._sklearn_add_random_state else None,
**self._sklearn_model_params).run()
class SKLearnByTaskMixin(_MultitaskRegressionWorkflowMixin, SKLearnWorkflowMixin):
"""
This runs BBSR regression on tasks defined by the AMUSR regression (MTL) workflow
"""
def run_bootstrap(self, bootstrap_idx):
betas, betas_resc = [], []
# Select the appropriate bootstrap from each task and stash the data into X and Y
for k in range(self._n_tasks):
x = self._task_design[k].get_bootstrap(self._task_bootstraps[k][bootstrap_idx])
y = self._task_response[k].get_bootstrap(self._task_bootstraps[k][bootstrap_idx])
utils.Debug.vprint('Calculating task {k} using {n}'.format(k=k, n=self._sklearn_model.__name__), level=0)
t_beta, t_br = SKLearnRegression(x,
y,
self._sklearn_model,
random_state=self.random_seed if self._sklearn_add_random_state else None,
**self._sklearn_model_params).run()
betas.append(t_beta)
betas_resc.append(t_br)
return betas, betas_resc
|
196095
|
from typing import Any, Tuple, List
import logging
from collections import namedtuple
from itertools import chain
log = logging.getLogger(__name__)
try:
import win32com.client
import pythoncom
from pythoncom import VT_BYREF, VT_R8, VT_I4
except ImportError as e:
message = ('To use the DynaCool Driver, please install win32com.'
' Installation can be done with pip install pypiwin32com')
log.exception(message)
raise ImportError(message)
CmdArgs = namedtuple('CmdArgs', 'cmd args')
# The length of a command header, aka a command keyword
# Every command sent from the driver via the server must have a
# keyword of exactly this length ('?' NOT included)
CMD_HEADER_LENGTH = 4
class CommandHandler:
"""
This is the class that gets called by the server.py
This class is responsible for making the actual calls into the instrument
firmware. The idea is that this class get a SCPI-like string from the
server, e.g. 'TEMP?' or 'TEMP 300, 10, 1' and then makes the corresponding
MultiVu API call (or returns an error message to the server).
"""
# Variable types
_variants = {'double': win32com.client.VARIANT(VT_BYREF | VT_R8, 0.0),
'long': win32com.client.VARIANT(VT_BYREF | VT_I4, 0)}
def __init__(self, inst_type: str = 'dynacool') -> None:
self.inst_type = inst_type
pythoncom.CoInitialize()
client_id = f'QD.MULTIVU.{inst_type.upper()}.1'
try:
self._mvu = win32com.client.Dispatch(client_id)
except pythoncom.com_error:
error_mssg = ('Could not connect to Multivu Application. Please '
'make sure that the MultiVu Application is running.')
log.exception(error_mssg)
raise ValueError(error_mssg)
_variants = CommandHandler._variants
# Hard-code what we know about the MultiVu API
self._gets = {'TEMP': CmdArgs(cmd=self._mvu.GetTemperature,
args=[_variants['double'],
_variants['long']]),
'CHAT': CmdArgs(cmd=self._mvu.GetChamberTemp,
args=[_variants['double'],
_variants['long']]),
'GLTS': CmdArgs(cmd=self._mvu.GetLastTempSetpoint,
args=[_variants['double'],
_variants['double'],
_variants['long']]),
'GLFS': CmdArgs(cmd=self._mvu.GetFieldSetpoints,
args=[_variants['double'],
_variants['double'],
_variants['long'],
_variants['long']]),
'CHAM': CmdArgs(cmd=self._mvu.GetChamber,
args=[_variants['long']]),
'FELD': CmdArgs(cmd=self._mvu.GetField,
args=[_variants['double'],
_variants['long']]),
'*IDN': CmdArgs(cmd=self.make_idn_string, args=[])}
self._sets = {'TEMP': self._mvu.SetTemperature,
'FELD': self._mvu.SetField}
# validate the commands
for cmd in chain(self._gets, self._sets):
if len(cmd) != CMD_HEADER_LENGTH:
raise ValueError(f'Invalid command length: {cmd}.'
f' Must have length {CMD_HEADER_LENGTH}')
def make_idn_string(self) -> str:
return f'0, QuantumDesign, {self.inst_type}, N/A, N/A'
def preparser(self, cmd_str: str) -> Tuple[CmdArgs, bool]:
"""
Parse the raw SCPI-like input string into a CmdArgs tuple containing
the corresponding MultiVu API function and a boolean indicating whether
we expect the MultiVu function to modify its input (i.e. be a query)
Args:
cmd_str: A SCPI-like string, e.g. 'TEMP?' or 'TEMP 300, 0.1, 1'
Returns:
A tuple of a CmdArgs tuple and a bool indicating whether this was
a query
"""
def err_func() -> int:
return -2
cmd_head = cmd_str[:CMD_HEADER_LENGTH]
if cmd_head not in set(self._gets.keys()).union(set(self._sets.keys())):
cmd = err_func
args: List[Any] = []
is_query = False
elif cmd_str.endswith('?'):
cmd = self._gets[cmd_head].cmd
args = self._gets[cmd_head].args
is_query = True
else:
cmd = self._sets[cmd_head]
args = list(float(arg) for arg in cmd_str[5:].split(', '))
is_query = False
return CmdArgs(cmd=cmd, args=args), is_query
@staticmethod
def postparser(error_code: int, vals: List[Any]) -> str:
"""
Parse the output of the MultiVu API call into a string that the server
can send back to the client
Args:
error_code: the error code returned from the MultiVu call
vals: A list of the returned values (empty in case of a set cmd)
"""
response = f'{error_code}'
for val in vals:
response += f', {val}'
return response
def __call__(self, cmd: str) -> str:
cmd_and_args, is_query = self.preparser(cmd)
log.debug(f'Parsed {cmd} into {cmd_and_args}')
# Actually perform the call into the MultiVu API
error_code = cmd_and_args.cmd(*cmd_and_args.args)
# return values in case we did a query
if is_query:
# read out the mutated values
# (win32 reverses the order)
vals = list(arg.value for arg in cmd_and_args.args)
vals.reverse()
# reset the value variables for good measures
for arg in cmd_and_args.args:
arg.value = 0
else:
vals = []
response_message = self.postparser(error_code, vals)
return response_message
|
196096
|
from bson.json_util import dumps, loads
class DataHandler:
@staticmethod
def from_json(json_data):
"""Decode the data from JSON"""
return loads(json_data) # Decode JSON data
@staticmethod
def to_json(data):
"""Encodes the data to JSON"""
return dumps(data) # Encode to JSON
@staticmethod
def is_device_found(devices_count):
"""Checks if any device found"""
return devices_count > 0
|
196142
|
import contextlib
try:
from asgiref.local import Local
except ImportError:
from threading import local as Local # noqa: N812
_thread_locals = Local()
@contextlib.contextmanager
def set_request(request):
"""
Context processor that sets the request on the current instance
"""
_thread_locals._request = request
yield
def get_request():
"""
Retrieve request from current instance
"""
return getattr(_thread_locals, "_request", None)
|
196151
|
import os
from typing import Union
from utils.ordered_yaml import OrderedYaml
ordered_yaml = OrderedYaml()
class Config(object):
SLACK = 'slack'
NOTIFICATION_WEBHOOK = 'notification_webhook'
WORKFLOWS = 'workflows'
CONFIG_FILE_NAME = 'config.yml'
def __init__(self, config_dir: str, profiles_dir: str) -> None:
self.config_dir = config_dir
self.profiles_dir = profiles_dir
self.config_dict = self._load_configuration()
def _load_configuration(self) -> dict:
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
config_file_path = os.path.join(self.config_dir, self.CONFIG_FILE_NAME)
if not os.path.exists(config_file_path):
return {}
return ordered_yaml.load(config_file_path)
@property
def anonymous_tracking_enabled(self) -> bool:
return self.config_dict.get('anonymous_usage_tracking', True)
@property
def slack_notification_webhook(self) -> Union[str, None]:
slack_config = self.config_dict.get(self.SLACK)
if slack_config is not None:
return slack_config.get(self.NOTIFICATION_WEBHOOK)
return None
@property
def is_slack_workflow(self) -> bool:
slack_config = self.config_dict.get(self.SLACK)
if slack_config is not None:
workflows = slack_config.get(self.WORKFLOWS)
if workflows is True:
return True
return False
@property
def target_dir(self) -> str:
target_path = self.config_dict.get('target-path')
if not target_path:
return os.getcwd()
return target_path
|
196178
|
import re
from collections import defaultdict
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For each user, if the group with the exact scope of permissions exists,
add the user to it, else create a new group with this scope of permissions
and add the user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
counter = get_counter_value(Group)
mapping = create_permissions_mapping(User)
for perms, users in mapping.items():
group = get_group_with_given_permissions(perms, groups)
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, counter, Group)
group.user_set.add(*users)
counter += 1
def get_counter_value(Group):
"""Get the number of next potential group."""
pattern = r"^Group (\d+)$"
group = Group.objects.filter(name__iregex=pattern).order_by("name").last()
if not group:
return 1
return int(re.match(pattern, group.name).group(1)) + 1
def create_permissions_mapping(User):
"""Create mapping permissions to users and potential new group name."""
mapping = defaultdict(set)
users = User.objects.filter(user_permissions__isnull=False).distinct().iterator()
for user in users:
permissions = user.user_permissions.all().order_by("pk")
perm_pks = tuple([perm.pk for perm in permissions])
mapping[perm_pks].add(user.pk)
user.user_permissions.clear()
return mapping
def get_group_with_given_permissions(permissions, groups):
"""Get group with given set of permissions."""
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, counter, Group):
"""Create new group with given set of permissions."""
group_name = f"Group {counter:03d}"
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0040_auto_20200415_0443"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
196222
|
import math
import pytest
from ..rcbd import RandomizedCompleteBlockDesign, RandomizedCompleteBlockDesign_MissingValues
class TestRCBD:
def test_rcbd_1(self):
exp = RandomizedCompleteBlockDesign(
[
[73, 68, 74, 71, 67],
[73, 67, 75, 72, 70],
[75, 68, 78, 73, 68],
[73, 71, 75, 75, 69],
]
)
abs_tol = 10 ** -3
assert math.isclose(exp.f_treatments, 2.3761, abs_tol=abs_tol)
assert math.isclose(exp.p_treatments, 0.1211, abs_tol=abs_tol)
def test_rcbd_2(self):
exp = RandomizedCompleteBlockDesign(
[
[9.3, 9.4, 9.6, 10.0],
[9.4, 9.3, 9.8, 9.9],
[9.2, 9.4, 9.5, 9.7],
[9.7, 9.6, 10.0, 10.2],
]
)
abs_tol = 10 ** -3
assert math.isclose(exp.f_treatments, 14.4375, abs_tol=abs_tol)
assert math.isclose(exp.p_treatments, 0.0009, abs_tol=abs_tol)
class TestRCBDMissing:
def test_rcbd_missing_1(self):
exp = RandomizedCompleteBlockDesign_MissingValues(
[
[18.5, 11.7, 15.4, 16.5],
[15.7, float("nan"), 16.6, 18.6],
[16.2, 12.9, 15.5, 12.7],
[14.1, 14.4, 20.3, 15.7],
[13.0, 16.9, 18.4, 16.5],
[13.6, 12.5, 41.5, 18.0],
]
)
abs_tol = 10 ** -3
assert math.isclose(exp.f_treatments, 0.7561, abs_tol=abs_tol)
assert math.isclose(exp.f_blocks, 2.0859, abs_tol=abs_tol)
def test_rcbd_missing_2(self):
exp = RandomizedCompleteBlockDesign_MissingValues(
[[12, 14, 12], [10, float("nan"), 8], [float("nan"), 15, 10]]
)
assert math.isclose(exp.f_treatments, 4.7500, abs_tol=10 ** -3)
assert math.isclose(exp.f_blocks, 7.7500, abs_tol=10 ** -3)
def test_rcbd_missing_3(self):
exp = RandomizedCompleteBlockDesign_MissingValues(
[
[90.3, 89.2, 98.2, 93.9, 87.4, 97.9],
[92.5, 89.5, 90.6, float("nan"), 87, 95.8],
[85.5, 90.8, 89.6, 86.2, 88, 93.4],
[82.5, 89.5, 85.6, 87.4, 78.9, 90.7],
]
)
abs_tol = 10 ** -3
assert math.isclose(exp.f_treatments, 7.6241, abs_tol=abs_tol)
assert math.isclose(exp.f_blocks, 5.2181, abs_tol=abs_tol)
def test_rcbd_missing_throw_error(self):
with pytest.raises(Exception):
# 3 missing, throw exception
RandomizedCompleteBlockDesign_MissingValues(
[
[18.5, 11.7, 15.4, 16.5],
[15.7, float("nan"), 16.6, 18.6],
[16.2, 12.9, 15.5, 12.7],
[14.1, 14.4, float("nan"), 15.7],
[13.0, 16.9, 18.4, 16.5],
[13.6, float("nan"), 41.5, 18.0],
]
)
def test_rcbd_multiple_comparisons(self):
exp = RandomizedCompleteBlockDesign(
[
[9.3, 9.4, 9.6, 10.0],
[9.4, 9.3, 9.8, 9.9],
[9.2, 9.4, 9.5, 9.7],
[9.7, 9.6, 10.0, 10.2],
]
)
exp.multiple_comparisons()
|
196231
|
import click
from globus_sdk import AuthAPIError
from globus_cli.login_manager import LoginManager
from globus_cli.parsing import command
from globus_cli.termio import (
FORMAT_TEXT_RECORD,
formatted_print,
is_verbose,
print_command_hint,
)
@command(
"whoami",
disable_options=["map_http_status"],
short_help="Show the currently logged-in identity",
adoc_output="""Note: this output is not affected by sessions in any way. For information
on which of your identities are in session use *globus session show*
If no options are given the default output is just the preferred
username of the logged in identity.
If *--linked-identities* is given the output will be each username in the
logged-in user's identity set.
If *--verbose* is given, the following fields will be output, either in
a record format or a table format if *--linked-identities* is also given.
- 'Username'
- 'Name'
- 'ID'
- 'Email'
""",
adoc_examples="""Display multiple fields of the current user's information:
[source,bash]
----
$ globus whoami -v
----
Display each username in the current user's identity set:
[source,bash]
----
$ globus whoami --linked-identities
----
""",
)
@click.option(
"--linked-identities",
is_flag=True,
help="Also show identities linked to the currently logged-in primary identity.",
)
@LoginManager.requires_login(LoginManager.AUTH_RS)
def whoami_command(*, login_manager, linked_identities):
"""
Display information for the currently logged-in user.
"""
auth_client = login_manager.get_auth_client()
# get userinfo from auth.
# if we get back an error the user likely needs to log in again
try:
res = auth_client.oauth2_userinfo()
except AuthAPIError:
click.echo(
"Unable to get user information. Please try logging in again.", err=True
)
click.get_current_context().exit(1)
print_command_hint(
"For information on which identities are in session see\n"
" globus session show\n"
)
# --linked-identities either displays all usernames or a table if verbose
if linked_identities:
try:
formatted_print(
res["identity_set"],
fields=[
("Username", "username"),
("Name", "name"),
("ID", "sub"),
("Email", "email"),
],
simple_text=(
None
if is_verbose()
else "\n".join([x["username"] for x in res["identity_set"]])
),
)
except KeyError:
click.echo(
"Your current login does not have the consents required "
"to view your full identity set. Please log in again "
"to agree to the required consents.",
err=True,
)
# Default output is the top level data
else:
formatted_print(
res,
text_format=FORMAT_TEXT_RECORD,
fields=[
("Username", "preferred_username"),
("Name", "name"),
("ID", "sub"),
("Email", "email"),
],
simple_text=(None if is_verbose() else res["preferred_username"]),
)
|
196262
|
import datetime
from typing import TYPE_CHECKING
from sqlalchemy import (Boolean,
Column,
DateTime,
ForeignKey,
Integer,
String,
Text)
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship
from database.base_class import Base
if TYPE_CHECKING:
from .users import User
class Category(Base):
id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
description = Column(String, index=True)
owner_id = Column(Integer, ForeignKey("user.id"))
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, onupdate=func.now())
owner = relationship("User", back_populates="categories")
recipes = relationship("Recipe", back_populates="category")
class Recipe(Base):
id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
description = Column(Text, nullable=False)
is_public = Column(Boolean, default=False)
owner_id = Column(Integer, ForeignKey("user.id"))
category_id = Column(Integer, ForeignKey("category.id"))
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, onupdate=func.now())
ingredients = relationship("Ingredient", back_populates="recipe")
category = relationship("Category", back_populates="recipes")
owner = relationship("User", back_populates="recipes")
class Ingredient(Base):
id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
recipe_id = Column(Integer, ForeignKey("recipe.id"))
recipe = relationship("Recipe", back_populates="ingredients")
|
196279
|
import copy
from zeep import xsd
from .base.custom_fields import CustomFields
from .factory import createFromUri, Creator
class Document(CustomFields):
def __init__(self, polarion, project, uri=None, location=None):
"""
Create a Document.
:param polarion: Polarion client object
:param project: Polarion Project object
:param uri: Polarion uri (first possibility to get a document)
:param location: Document location (second possibility to get a document)
"""
super().__init__(polarion, project, uri=uri)
self._uri = uri
self._project = project
self._polarion = polarion
if self._uri is not None:
service = self._polarion.getService('Tracker')
self._polarion_document = service.getModuleByUri(self._uri)
if self._polarion_document is not None and self._polarion_document.unresolvable:
raise Exception(
f'Cannot find document at URI {self._uri} in project {self._project.id}')
elif location is not None:
service = self._polarion.getService('Tracker')
self._polarion_document = service.getModuleByLocation(self._project.id, location)
if self._polarion_document is not None and self._polarion_document.unresolvable:
raise Exception(
f'Cannot find document at location {location} in project {self._project.id}')
self._uri = self._polarion_document.uri
self._buildFromPolarion()
def _buildFromPolarion(self):
if self._polarion_document is not None and self._polarion_document.unresolvable is False:
self._original_polarion = copy.deepcopy(self._polarion_document)
for attr, value in self._polarion_document.__dict__.items():
for key in value:
setattr(self, key, value[key])
def _reloadFromPolarion(self):
service = self._polarion.getService('Tracker')
self._polarion_document = service.getModuleByUri(self._uri)
self._buildFromPolarion()
def getWorkitemUris(self):
"""
Get the uris of all workitems in the document.
:return: string[]
"""
service = self._polarion.getService('Tracker')
workitems = service.getModuleWorkItemUris(self._uri, None, True)
return workitems
def getWorkitems(self):
"""
Get all complete workitems.
That may take some time on a large document.
:return: Workitem[]
"""
workitems = []
workitem_uris = self.getWorkitemUris()
for workitem_uri in workitem_uris:
workitems.append(createFromUri(self._polarion, self._project, workitem_uri))
return workitems
def getTopLevelWorkitem(self):
"""
Get the top level workitem, which is usually the title.
:return: Workitem
"""
return createFromUri(self._polarion, self._project, self.getWorkitemUris()[0])
def getChildren(self, workitem):
"""
Gets the children of a workitem in the document.
:param workitem: Workitem to get children for
:return: List of workitems
"""
workitem_children = []
if workitem.linkedWorkItemsDerived is not None:
document_uris = self.getWorkitemUris()
children = (w for w in workitem.linkedWorkItemsDerived.LinkedWorkItem if
w.role.id == self.structureLinkRole.id and w.workItemURI in document_uris)
for child in children:
workitem_children.append(createFromUri(self._polarion, self._project, child.workItemURI))
return workitem_children
def getParent(self, workitem):
"""
Gets the parent of a workitem in the document.
:param workitem: Workitem to get parent for
:return: Parent workitem, None if no parent
"""
parent = None
if workitem.linkedWorkItems is not None:
document_uris = self.getWorkitemUris()
parent_uri = [w for w in workitem.linkedWorkItems.LinkedWorkItem if
w.role.id == self.structureLinkRole.id and w.workItemURI in document_uris][0]
parent = createFromUri(self._polarion, self._project, parent_uri.workItemURI)
return parent
def addHeading(self, title, parent_workitem=None):
"""
Adds a heading to a document
:param title: Title of the heading
:param parent_workitem: Parent workitem in the document hierarchy, set to None to create it on top level
:return: Heading workitem
"""
heading = self._project.createWorkitem('heading')
heading.title = title
heading.save()
heading.moveToDocument(self, parent_workitem)
return heading
def isCustomFieldAllowed(self, _):
"""
Checks if the custom field of a given key is allowed.
The Polarion interface to get allowed custom fields only supports work items.
:return: If the field is allowed
:rtype: bool
"""
return True
def reuse(self, target_project_id, target_location, target_name, target_title, link_role='derived_from',
derived_fields=None):
"""
Reuse this document in a different project.
:param target_project_id: The target project id
:param target_location: Location of the target document
:param target_name: The target document's name
:param target_title: Title of the target document
:param link_role: Link role of the derived documents
:param derived_fields: List of fields to be derived in the target document
:return: The new document
"""
if derived_fields is None:
derived_fields = ['title', 'description']
service = self._polarion.getService('Tracker')
new_uri = service.reuseDocument(self._uri, target_project_id, target_location, target_name, target_title, True,
link_role, derived_fields)
return createFromUri(self._polarion, self._project, new_uri)
def update(self, revision=None, auto_suspect=False):
"""
Update a reused document to a revision of the source document.
:param revision: Source document revision
:param auto_suspect: If set to True, changed workitems will mark their links as suspect
"""
service = self._polarion.getService('Tracker')
service.updateDerivedDocument(self._uri, revision if revision is not None else xsd.const.Nil, auto_suspect)
def save(self):
"""
Update the document in polarion
"""
updated_item = {}
for attr, value in self._polarion_document.__dict__.items():
for key in value:
current_value = getattr(self, key)
prev_value = getattr(self._original_polarion, key)
if current_value != prev_value:
updated_item[key] = current_value
if len(updated_item) > 0:
updated_item['uri'] = self._uri
service = self._polarion.getService('Tracker')
service.updateModule(updated_item)
self._reloadFromPolarion()
def delete(self):
"""
Deletes a document
"""
service = self._polarion.getService('Tracker')
service.deleteModule(self.uri)
def __repr__(self):
return f'Polarion document {self.title} in {self.moduleFolder}'
def __str__(self):
return f'Polarion document {self.title} in {self.moduleFolder}'
class DocumentCreator(Creator):
def createFromUri(self, polarion, project, uri):
return Document(polarion, project, uri)
|
196287
|
class CountdownCancelAll:
def __init__(self):
self.symbol = ""
self.countdownTime = 0
@staticmethod
def json_parse(json_data):
result = CountdownCancelAll()
result.symbol = json_data.get_string("symbol")
result.countdownTime = json_data.get_int("countdownTime")
return result
|
196297
|
import typing
from scipy.interpolate import interp1d
import numpy as np
import slippy
from slippy.core import _SubModelABC
from slippy.core.materials import _IMMaterial
from slippy.core.influence_matrix_utils import bccg, plan_convolve
# TODO add from_offset option to get the displacement from the offset
class TangentialPartialSlip(_SubModelABC):
""" Solves the partial slip problem
Parameters
----------
name: str
The name of the sub model, used for debugging
direction: {'x', 'y'}
The direction of applied load or displacement, only 'x' and 'y' are currently supported
load, displacement: float or sequence of floats
Up to one can be supplied, either the total load or the rigid body displacement. Suitable values are:
- float: indicating a constant load/ displacement
- 2 by n array: of time points and load/ displacement values
If an array is supplied and it is too short it is extrapolated by repeating the final value, this produces a
warning. If neither are supplied this sub-model requires rigid_body_displacement to be provided by a further
sub-model
periodic_axes: 2 element sequence of bool, optional (False, False)
True for each axis which the solution should be periodic in, should match solving step
tol: float, optional (1e-7)
The tolerance used to declare convergence for the bccg iterations
max_it: int, optional (None)
The maximum number of iterations for the bccg iterations, defaults to the same as the number of contact nodes
"""
def __init__(self, name: str, direction: str,
load: typing.Union[float, typing.Sequence] = None,
displacement: typing.Union[float, typing.Sequence] = None,
periodic_axes: typing.Sequence[bool] = (False, False),
tol: float = 1e-7, max_it: int = None):
requires = {'maximum_tangential_force', 'contact_nodes', 'time'}
if load is None and displacement is None:
self.displacement_from_sub_model = True
requires.add('rigid_body_displacement_' + direction)
self.update_displacement = False
else:
self.displacement_from_sub_model = False
provides = {'slip_distance', 'stick_nodes', 'loads_x', 'loads_y', 'total_displacement_x',
'total_displacement_y'}
super().__init__(name, requires, provides)
self.load_controlled = False
if load is not None:
if displacement is not None:
raise ValueError("Either the load or the displacement can be set, not both")
try:
self.load = float(load)
self.update_load = False
self.load_upd = None
except TypeError:
self.load = None
self.load_upd = interp1d(load[0, :], load[1, :], fill_value='extrapolate')
self.update_load = True
self.load_controlled = True
if displacement is not None:
try:
self.displacement = float(displacement)
self.update_displacement = False
self.displacement_upd = None
except TypeError:
self.displacement = None
self.displacement_upd = interp1d(displacement[0, :], displacement[1, :], fill_value='extrapolate')
self.update_displacement = True
self.component = direction * 2
self._last_span = None
self._pre_solve_checks = False
self._im_1 = None
self._im_2 = None
self._im_total = None
self._periodic_axes = periodic_axes
self._tol = tol
self._max_it = max_it
self.previous_result = None
def _check(self, span):
# check that both are im materials and store ims
if isinstance(self.model.surface_1.material, _IMMaterial) and \
isinstance(self.model.surface_2.material, _IMMaterial):
im_1 = self.model.surface_1.material.influence_matrix([self.component],
[self.model.surface_1.grid_spacing] * 2,
span)[self.component]
im_2 = self.model.surface_2.material.influence_matrix([self.component],
[self.model.surface_1.grid_spacing] * 2,
span)[self.component]
self._im_1 = im_1
self._im_2 = im_2
self._im_total = im_1 + im_2
self._pre_solve_checks = True
else:
raise ValueError("This sub model only supports influence matrix based materials")
def solve(self, current_state: dict) -> dict:
span = current_state['maximum_tangential_force'].shape
if not self._pre_solve_checks or span != self._last_span:
self._check(span)
self._last_span = span
domain = current_state['contact_nodes']
conv_func = plan_convolve(self._im_total, self._im_total, domain,
circular=self._periodic_axes)
# if the displacements are provided by another sub model or we have a set displacement we just have one set
# of bccg iterations:
if not self.load_controlled:
if self.update_displacement:
set_displacement = self.displacement_upd(current_state['time'])
elif self.displacement_from_sub_model:
set_displacement = current_state['rigid_body_displacement_' + self.component[0]]
else:
set_displacement = self.displacement
try:
set_displacement = float(set_displacement)*np.ones_like(current_state['maximum_tangential_force'])
except TypeError:
pass
x0 = self.previous_result if self.previous_result is not None else \
current_state['maximum_tangential_force']/2
min_pressure = np.array(-1*current_state['maximum_tangential_force'][domain])
loads_in_domain, failed = bccg(conv_func, set_displacement[domain], self._tol,
self._max_it, x0[domain],
min_pressure,
current_state['maximum_tangential_force'][domain])
loads_in_domain = slippy.asnumpy(loads_in_domain)
full_loads = np.zeros_like(current_state['maximum_tangential_force'])
full_loads[domain] = loads_in_domain
stick_nodes = np.logical_and(domain, full_loads < (0.99 * current_state['maximum_tangential_force']))
current_state['stick_nodes'] = stick_nodes
tangential_deformation = slippy.asnumpy(conv_func(loads_in_domain, True))
current_state['loads_' + self.component[0]] = full_loads
if 'total_displacement_' + self.component[0] in current_state:
current_state['total_displacement_' + self.component[0]] += tangential_deformation
else:
current_state['total_displacement_' + self.component[0]] = tangential_deformation
slip_distance = set_displacement-tangential_deformation
slip_distance[stick_nodes] = 0
slip_distance[np.logical_not(domain)] = 0
current_state['slip_distance'] = slip_distance
return current_state
else:
raise NotImplementedError('Load controlled partial slip is not yet implemented')
|
196298
|
from rpython.rlib import jit, rgc
from rpython.rlib.buffer import RawBuffer
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rarithmetic import ovfcheck, widen
from rpython.rlib.unroll import unrolling_iterable
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import (
GetSetProperty, TypeDef, make_weakref_descr)
from pypy.module._file.interp_file import W_File
@unwrap_spec(typecode='text')
def w_array(space, w_cls, typecode, __args__):
if len(__args__.arguments_w) > 1:
raise oefmt(space.w_TypeError, "array() takes at most 2 arguments")
if len(typecode) != 1:
raise oefmt(space.w_TypeError,
"array() argument 1 must be char, not str")
typecode = typecode[0]
if space.is_w(w_cls, space.gettypeobject(W_ArrayBase.typedef)):
if __args__.keywords:
raise oefmt(space.w_TypeError,
"array.array() does not take keyword arguments")
for tc in unroll_typecodes:
if typecode == tc:
a = space.allocate_instance(types[tc].w_class, w_cls)
a.__init__(space)
break
else:
raise oefmt(space.w_ValueError,
"bad typecode (must be c, b, B, u, h, H, i, I, l, L, f or "
"d)")
if len(__args__.arguments_w) > 0:
w_initializer = __args__.arguments_w[0]
w_initializer_type = space.type(w_initializer)
if w_initializer_type is space.w_bytes:
a.descr_fromstring(space, w_initializer)
elif w_initializer_type is space.w_list:
a.descr_fromlist(space, w_initializer)
else:
a.extend(w_initializer, True)
return a
def descr_itemsize(space, self):
return space.newint(self.itemsize)
def descr_typecode(space, self):
return space.newtext(self.typecode)
arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens=['comp_func'],
reds='auto')
EQ, NE, LT, LE, GT, GE = range(6)
def compare_arrays(space, arr1, arr2, comp_op):
if not (isinstance(arr1, W_ArrayBase) and isinstance(arr2, W_ArrayBase)):
return space.w_NotImplemented
if comp_op == EQ and arr1.len != arr2.len:
return space.w_False
if comp_op == NE and arr1.len != arr2.len:
return space.w_True
lgt = min(arr1.len, arr2.len)
for i in range(lgt):
arr_eq_driver.jit_merge_point(comp_func=comp_op)
w_elem1 = arr1.w_getitem(space, i)
w_elem2 = arr2.w_getitem(space, i)
if comp_op == EQ:
res = space.eq_w(w_elem1, w_elem2)
if not res:
return space.w_False
elif comp_op == NE:
res = space.is_true(space.ne(w_elem1, w_elem2))
if res:
return space.w_True
elif comp_op == LT or comp_op == GT:
if comp_op == LT:
res = space.is_true(space.lt(w_elem1, w_elem2))
else:
res = space.is_true(space.gt(w_elem1, w_elem2))
if res:
return space.w_True
elif not space.eq_w(w_elem1, w_elem2):
return space.w_False
else:
if comp_op == LE:
res = space.is_true(space.le(w_elem1, w_elem2))
else:
res = space.is_true(space.ge(w_elem1, w_elem2))
if not res:
return space.w_False
elif not space.eq_w(w_elem1, w_elem2):
return space.w_True
# we have some leftovers
if comp_op == EQ:
return space.w_True
elif comp_op == NE:
return space.w_False
if arr1.len == arr2.len:
if comp_op == LT or comp_op == GT:
return space.w_False
return space.w_True
if comp_op == LT or comp_op == LE:
if arr1.len < arr2.len:
return space.w_True
return space.w_False
if arr1.len > arr2.len:
return space.w_True
return space.w_False
index_count_jd = jit.JitDriver(
greens = ['count', 'arrclass', 'tp_item'],
reds = 'auto', name = 'array.index_or_count')
def index_count_array(arr, w_val, count=False):
space = arr.space
tp_item = space.type(w_val)
arrclass = arr.__class__
cnt = 0
for i in range(arr.len):
index_count_jd.jit_merge_point(
tp_item=tp_item, count=count,
arrclass=arrclass)
w_item = arr.w_getitem(space, i)
if space.eq_w(w_item, w_val):
if count:
cnt += 1
else:
return i
if count:
return cnt
return -1
UNICODE_ARRAY = lltype.Ptr(lltype.Array(lltype.UniChar,
hints={'nolength': True}))
class W_ArrayBase(W_Root):
_attrs_ = ('space', 'len', 'allocated', '_lifeline_', '_buffer')
def __init__(self, space):
self.space = space
self.len = 0
self.allocated = 0
self._buffer = lltype.nullptr(rffi.CCHARP.TO)
@rgc.must_be_light_finalizer
def __del__(self):
if self._buffer:
lltype.free(self._buffer, flavor='raw')
def setlen(self, size, zero=False, overallocate=True):
if size > 0:
if size > self.allocated or size < self.allocated / 2:
if overallocate:
if size < 9:
some = 3
else:
some = 6
some += size >> 3
else:
some = 0
self.allocated = size + some
byte_size = self.allocated * self.itemsize
if zero:
new_buffer = lltype.malloc(
rffi.CCHARP.TO, byte_size, flavor='raw',
add_memory_pressure=True, zero=True)
else:
new_buffer = lltype.malloc(
rffi.CCHARP.TO, byte_size, flavor='raw',
add_memory_pressure=True)
copy_bytes = min(size, self.len) * self.itemsize
rffi.c_memcpy(rffi.cast(rffi.VOIDP, new_buffer),
rffi.cast(rffi.VOIDP, self._buffer),
copy_bytes)
else:
self.len = size
return
else:
assert size == 0
self.allocated = 0
new_buffer = lltype.nullptr(rffi.CCHARP.TO)
if self._buffer:
lltype.free(self._buffer, flavor='raw')
self._buffer = new_buffer
self.len = size
def _fromiterable(self, w_seq):
# used by fromsequence().
# a more careful case if w_seq happens to be a very large
# iterable: don't copy the items into some intermediate list
w_iterator = self.space.iter(w_seq)
tp = self.space.type(w_iterator)
while True:
unpack_driver.jit_merge_point(selfclass=self.__class__,
tp=tp, self=self,
w_iterator=w_iterator)
space = self.space
try:
w_item = space.next(w_iterator)
except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break # done
self.descr_append(space, w_item)
def _charbuf_start(self):
return self._buffer
def _buffer_as_unsigned(self):
return rffi.cast(lltype.Unsigned, self._buffer)
def _charbuf_stop(self):
keepalive_until_here(self)
def delitem(self, space, i, j):
if i < 0:
i += self.len
if i < 0:
i = 0
if j < 0:
j += self.len
if j < 0:
j = 0
if j > self.len:
j = self.len
if i >= j:
return None
oldbuffer = self._buffer
self._buffer = lltype.malloc(rffi.CCHARP.TO,
(self.len - (j - i)) * self.itemsize, flavor='raw',
add_memory_pressure=True)
if i:
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, self._buffer),
rffi.cast(rffi.VOIDP, oldbuffer),
i * self.itemsize
)
if j < self.len:
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, rffi.ptradd(self._buffer,
i * self.itemsize)),
rffi.cast(rffi.VOIDP, rffi.ptradd(oldbuffer,
j * self.itemsize)),
(self.len - j) * self.itemsize
)
self.len -= j - i
self.allocated = self.len
if oldbuffer:
lltype.free(oldbuffer, flavor='raw')
def readbuf_w(self, space):
return ArrayBuffer(self, True)
def writebuf_w(self, space):
return ArrayBuffer(self, False)
def descr_append(self, space, w_x):
""" append(x)
Append new value x to the end of the array.
"""
raise NotImplementedError
def descr_extend(self, space, w_x):
""" extend(array or iterable)
Append items to the end of the array.
"""
self.extend(w_x)
def descr_count(self, space, w_x):
""" count(x)
Return number of occurrences of x in the array.
"""
cnt = index_count_array(self, w_x, count=True)
return space.newint(cnt)
def descr_index(self, space, w_x):
""" index(x)
Return index of first occurrence of x in the array.
"""
res = index_count_array(self, w_x, count=False)
if res >= 0:
return space.newint(res)
raise oefmt(space.w_ValueError, "array.index(x): x not in list")
def descr_reverse(self, space):
""" reverse()
Reverse the order of the items in the array.
"""
raise NotImplementedError
def descr_remove(self, space, w_val):
""" remove(x)
Remove the first occurrence of x in the array.
"""
w_idx = self.descr_index(space, w_val)
self.descr_pop(space, space.int_w(w_idx))
@unwrap_spec(i=int)
def descr_pop(self, space, i=-1):
""" pop([i])
Return the i-th element and delete it from the array. i defaults to -1.
"""
raise NotImplementedError
@unwrap_spec(idx=int)
def descr_insert(self, space, idx, w_val):
""" insert(i,x)
Insert a new item x into the array before position i.
"""
raise NotImplementedError
def descr_tolist(self, space):
""" tolist() -> list
Convert array to an ordinary list with the same items.
"""
w_l = space.newlist([])
for i in range(self.len):
w_l.append(self.w_getitem(space, i))
return w_l
def descr_fromlist(self, space, w_lst):
""" fromlist(list)
Append items to array from list.
"""
if not space.isinstance_w(w_lst, space.w_list):
raise oefmt(space.w_TypeError, "arg must be list")
s = self.len
try:
self.fromsequence(w_lst)
except OperationError:
self.setlen(s)
raise
def descr_tostring(self, space):
""" tostring() -> string
Convert the array to an array of machine values and return the string
representation.
"""
size = self.len
if size == 0:
return space.newbytes('')
cbuf = self._charbuf_start()
s = rffi.charpsize2str(cbuf, size * self.itemsize)
self._charbuf_stop()
return self.space.newbytes(s)
def descr_fromstring(self, space, w_s):
""" fromstring(string)
Appends items from the string, interpreting it as an array of machine
values,as if it had been read from a file using the fromfile() method).
"""
if self is w_s:
raise oefmt(space.w_ValueError,
"array.fromstring(x): x cannot be self")
s = space.getarg_w('s#', w_s)
if len(s) % self.itemsize != 0:
raise oefmt(self.space.w_ValueError,
"string length not a multiple of item size")
oldlen = self.len
new = len(s) / self.itemsize
if not new:
return
self.setlen(oldlen + new)
cbuf = self._charbuf_start()
copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize),
0, len(s))
self._charbuf_stop()
@unwrap_spec(w_f=W_File, n=int)
def descr_fromfile(self, space, w_f, n):
""" fromfile(f, n)
Read n objects from the file object f and append them to the end of the
array. Also called as read.
"""
try:
size = ovfcheck(self.itemsize * n)
except OverflowError:
raise MemoryError
w_item = space.call_method(w_f, 'read', space.newint(size))
item = space.bytes_w(w_item)
if len(item) < size:
n = len(item) % self.itemsize
elems = max(0, len(item) - (len(item) % self.itemsize))
if n != 0:
item = item[0:elems]
self.descr_fromstring(space, space.newbytes(item))
raise oefmt(space.w_EOFError, "not enough items in file")
self.descr_fromstring(space, w_item)
@unwrap_spec(w_f=W_File)
def descr_tofile(self, space, w_f):
""" tofile(f)
Write all items (as machine values) to the file object f. Also
called as write.
"""
w_s = self.descr_tostring(space)
space.call_method(w_f, 'write', w_s)
def descr_fromunicode(self, space, w_ustr):
""" fromunicode(ustr)
Extends this array with data from the unicode string ustr.
The array must be a type 'u' array; otherwise a ValueError
is raised. Use array.fromstring(ustr.decode(...)) to
append Unicode data to an array of some other type.
"""
# XXX the following probable bug is not emulated:
# CPython accepts a non-unicode string or a buffer, and then
# behaves just like fromstring(), except that it strangely truncate
# string arguments at multiples of the unicode byte size.
# Let's only accept unicode arguments for now.
if self.typecode == 'u':
self.fromsequence(w_ustr)
else:
raise oefmt(space.w_ValueError,
"fromunicode() may only be called on type 'u' arrays")
def descr_tounicode(self, space):
""" tounicode() -> unicode
Convert the array to a unicode string. The array must be
a type 'u' array; otherwise a ValueError is raised. Use
array.tostring().decode() to obtain a unicode string from
an array of some other type.
"""
if self.typecode == 'u':
buf = rffi.cast(UNICODE_ARRAY, self._buffer_as_unsigned())
return space.newunicode(rffi.wcharpsize2unicode(buf, self.len))
else:
raise oefmt(space.w_ValueError,
"tounicode() may only be called on type 'u' arrays")
def descr_buffer_info(self, space):
""" buffer_info() -> (address, length)
Return a tuple (address, length) giving the current memory address and
the length in items of the buffer used to hold array's contents
The length should be multiplied by the itemsize attribute to calculate
the buffer length in bytes.
"""
w_ptr = space.newint(self._buffer_as_unsigned())
w_len = space.newint(self.len)
return space.newtuple([w_ptr, w_len])
def descr_reduce(self, space):
""" Return state information for pickling.
"""
if self.len > 0:
w_s = self.descr_tostring(space)
args = [space.newtext(self.typecode), w_s]
else:
args = [space.newtext(self.typecode)]
try:
w_dict = space.getattr(self, space.newtext('__dict__'))
except OperationError:
w_dict = space.w_None
return space.newtuple([space.type(self), space.newtuple(args), w_dict])
def descr_copy(self, space):
""" copy(array)
Return a copy of the array.
"""
w_a = self.constructor(self.space)
w_a.setlen(self.len, overallocate=False)
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, w_a._buffer_as_unsigned()),
rffi.cast(rffi.VOIDP, self._buffer_as_unsigned()),
self.len * self.itemsize
)
return w_a
def descr_byteswap(self, space):
""" byteswap()
Byteswap all items of the array. If the items in the array are
not 1, 2, 4, or 8 bytes in size, RuntimeError is raised.
"""
if self.itemsize not in [1, 2, 4, 8]:
raise oefmt(space.w_RuntimeError,
"byteswap not supported for this array")
if self.len == 0:
return
bytes = self._charbuf_start()
tmp = [bytes[0]] * self.itemsize
for start in range(0, self.len * self.itemsize, self.itemsize):
stop = start + self.itemsize - 1
for i in range(self.itemsize):
tmp[i] = bytes[start + i]
for i in range(self.itemsize):
bytes[stop - i] = tmp[i]
self._charbuf_stop()
def descr_len(self, space):
return space.newint(self.len)
def descr_eq(self, space, w_arr2):
"x.__eq__(y) <==> x==y"
return compare_arrays(space, self, w_arr2, EQ)
def descr_ne(self, space, w_arr2):
"x.__ne__(y) <==> x!=y"
return compare_arrays(space, self, w_arr2, NE)
def descr_lt(self, space, w_arr2):
"x.__lt__(y) <==> x<y"
return compare_arrays(space, self, w_arr2, LT)
def descr_le(self, space, w_arr2):
"x.__le__(y) <==> x<=y"
return compare_arrays(space, self, w_arr2, LE)
def descr_gt(self, space, w_arr2):
"x.__gt__(y) <==> x>y"
return compare_arrays(space, self, w_arr2, GT)
def descr_ge(self, space, w_arr2):
"x.__ge__(y) <==> x>=y"
return compare_arrays(space, self, w_arr2, GE)
# Basic get/set/append/extend methods
def descr_getitem(self, space, w_idx):
"x.__getitem__(y) <==> x[y]"
if not space.isinstance_w(w_idx, space.w_slice):
idx, stop, step = space.decode_index(w_idx, self.len)
assert step == 0
return self.w_getitem(space, idx)
else:
return self.getitem_slice(space, w_idx)
def descr_getslice(self, space, w_i, w_j):
return space.getitem(self, space.newslice(w_i, w_j, space.w_None))
def descr_setitem(self, space, w_idx, w_item):
"x.__setitem__(i, y) <==> x[i]=y"
if space.isinstance_w(w_idx, space.w_slice):
self.setitem_slice(space, w_idx, w_item)
else:
self.setitem(space, w_idx, w_item)
def descr_setslice(self, space, w_start, w_stop, w_item):
self.setitem_slice(space,
space.newslice(w_start, w_stop, space.w_None),
w_item)
def descr_delitem(self, space, w_idx):
start, stop, step, size = self.space.decode_index4(w_idx, self.len)
if step != 1:
# I don't care about efficiency of that so far
w_lst = self.descr_tolist(space)
space.delitem(w_lst, w_idx)
self.setlen(0)
self.fromsequence(w_lst)
return
return self.delitem(space, start, stop)
def descr_delslice(self, space, w_start, w_stop):
self.descr_delitem(space, space.newslice(w_start, w_stop,
space.w_None))
def descr_iter(self, space):
return space.newseqiter(self)
def descr_add(self, space, w_other):
if (not isinstance(w_other, W_ArrayBase)
or w_other.typecode != self.typecode):
return space.w_NotImplemented
a = self.constructor(space)
a.setlen(self.len + w_other.len, overallocate=False)
if self.len:
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, a._buffer),
rffi.cast(rffi.VOIDP, self._buffer),
self.len * self.itemsize
)
if w_other.len:
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, rffi.ptradd(a._buffer,
self.len * self.itemsize)),
rffi.cast(rffi.VOIDP, w_other._buffer),
w_other.len * self.itemsize
)
keepalive_until_here(self)
keepalive_until_here(a)
return a
def descr_inplace_add(self, space, w_other):
if (not isinstance(w_other, W_ArrayBase)
or w_other.typecode != self.typecode):
return space.w_NotImplemented
oldlen = self.len
otherlen = w_other.len
self.setlen(oldlen + otherlen)
if otherlen:
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, rffi.ptradd(self._buffer,
oldlen * self.itemsize)),
rffi.cast(rffi.VOIDP, w_other._buffer),
otherlen * self.itemsize
)
keepalive_until_here(self)
keepalive_until_here(w_other)
return self
def _mul_helper(self, space, w_repeat, is_inplace):
try:
repeat = space.getindex_w(w_repeat, space.w_OverflowError)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
if is_inplace:
a = self
start = 1
else:
a = self.constructor(space)
start = 0
if repeat <= start:
if repeat <= 0:
a.setlen(0, overallocate=False)
return a
oldlen = self.len
try:
newlen = ovfcheck(oldlen * repeat)
except OverflowError:
raise MemoryError
#
srcbuf = self._buffer
srcsize = self.len * self.itemsize
for i in range(srcsize):
if srcbuf[i] != '\x00':
break
else:
# the source is entirely zero: initialize the target
# with zeroes too
a.setlen(newlen, zero=True, overallocate=False)
return a
#
a.setlen(newlen, overallocate=False)
srcbuf = self._buffer # reload this, in case self is a
if oldlen == 1:
self._repeat_single_item(a, start, repeat)
else:
dstbuf = a._buffer
if start == 1:
dstbuf = rffi.ptradd(dstbuf, srcsize)
for r in range(start, repeat):
rffi.c_memcpy(rffi.cast(rffi.VOIDP, dstbuf),
rffi.cast(rffi.VOIDP, srcbuf),
srcsize)
dstbuf = rffi.ptradd(dstbuf, srcsize)
keepalive_until_here(self)
keepalive_until_here(a)
return a
def descr_mul(self, space, w_repeat):
return self._mul_helper(space, w_repeat, False)
def descr_inplace_mul(self, space, w_repeat):
return self._mul_helper(space, w_repeat, True)
def descr_radd(self, space, w_other):
return self.descr_add(space, w_other)
def descr_rmul(self, space, w_repeat):
return self.descr_mul(space, w_repeat)
# Misc methods
def descr_repr(self, space):
if self.len == 0:
return space.newtext("array('%s')" % self.typecode)
elif self.typecode == "c":
r = space.repr(self.descr_tostring(space))
s = "array('%s', %s)" % (self.typecode, space.text_w(r))
return space.newtext(s)
elif self.typecode == "u":
r = space.repr(self.descr_tounicode(space))
s = "array('%s', %s)" % (self.typecode, space.text_w(r))
return space.newtext(s)
else:
r = space.repr(self.descr_tolist(space))
s = "array('%s', %s)" % (self.typecode, space.text_w(r))
return space.newtext(s)
W_ArrayBase.typedef = TypeDef(
'array.array', None, None, "read-write",
__new__ = interp2app(w_array),
__len__ = interp2app(W_ArrayBase.descr_len),
__eq__ = interp2app(W_ArrayBase.descr_eq),
__ne__ = interp2app(W_ArrayBase.descr_ne),
__lt__ = interp2app(W_ArrayBase.descr_lt),
__le__ = interp2app(W_ArrayBase.descr_le),
__gt__ = interp2app(W_ArrayBase.descr_gt),
__ge__ = interp2app(W_ArrayBase.descr_ge),
__getitem__ = interp2app(W_ArrayBase.descr_getitem),
__getslice__ = interp2app(W_ArrayBase.descr_getslice),
__setitem__ = interp2app(W_ArrayBase.descr_setitem),
__setslice__ = interp2app(W_ArrayBase.descr_setslice),
__delitem__ = interp2app(W_ArrayBase.descr_delitem),
__delslice__ = interp2app(W_ArrayBase.descr_delslice),
__iter__ = interp2app(W_ArrayBase.descr_iter),
__add__ = interpindirect2app(W_ArrayBase.descr_add),
__iadd__ = interpindirect2app(W_ArrayBase.descr_inplace_add),
__mul__ = interpindirect2app(W_ArrayBase.descr_mul),
__imul__ = interpindirect2app(W_ArrayBase.descr_inplace_mul),
__radd__ = interp2app(W_ArrayBase.descr_radd),
__rmul__ = interp2app(W_ArrayBase.descr_rmul),
__repr__ = interp2app(W_ArrayBase.descr_repr),
itemsize = GetSetProperty(descr_itemsize),
typecode = GetSetProperty(descr_typecode),
__weakref__ = make_weakref_descr(W_ArrayBase),
append = interpindirect2app(W_ArrayBase.descr_append),
extend = interp2app(W_ArrayBase.descr_extend),
count = interpindirect2app(W_ArrayBase.descr_count),
index = interpindirect2app(W_ArrayBase.descr_index),
reverse = interpindirect2app(W_ArrayBase.descr_reverse),
remove = interpindirect2app(W_ArrayBase.descr_remove),
pop = interpindirect2app(W_ArrayBase.descr_pop),
insert = interpindirect2app(W_ArrayBase.descr_insert),
tolist = interp2app(W_ArrayBase.descr_tolist),
fromlist = interp2app(W_ArrayBase.descr_fromlist),
tostring = interp2app(W_ArrayBase.descr_tostring),
fromstring = interp2app(W_ArrayBase.descr_fromstring),
tofile = interp2app(W_ArrayBase.descr_tofile),
fromfile = interp2app(W_ArrayBase.descr_fromfile),
fromunicode = interp2app(W_ArrayBase.descr_fromunicode),
tounicode = interp2app(W_ArrayBase.descr_tounicode),
buffer_info = interp2app(W_ArrayBase.descr_buffer_info),
__copy__ = interp2app(W_ArrayBase.descr_copy),
__reduce__ = interp2app(W_ArrayBase.descr_reduce),
byteswap = interp2app(W_ArrayBase.descr_byteswap),
)
class TypeCode(object):
def __init__(self, itemtype, unwrap, canoverflow=False, signed=False,
method='__int__', errorname=None):
if errorname is None:
errorname = unwrap[:-2]
self.itemtype = itemtype
self.bytes = rffi.sizeof(itemtype)
self.arraytype = lltype.Array(itemtype, hints={'nolength': True})
self.arrayptrtype = lltype.Ptr(self.arraytype)
self.unwrap = unwrap
self.signed = signed
self.canoverflow = canoverflow
self.w_class = None
self.method = method
self.errorname = errorname
def _freeze_(self):
# hint for the annotator: track individual constant instances
return True
if rffi.sizeof(rffi.UINT) == rffi.sizeof(rffi.ULONG):
# 32 bits: UINT can't safely overflow into a C long (rpython int)
# via int_w, handle it like ULONG below
_UINTTypeCode = \
TypeCode(rffi.UINT, 'bigint_w')
else:
_UINTTypeCode = \
TypeCode(rffi.UINT, 'int_w', True)
types = {
'c': TypeCode(lltype.Char, 'bytes_w', method=''),
'u': TypeCode(lltype.UniChar, 'unicode_w', method=''),
'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True),
'B': TypeCode(rffi.UCHAR, 'int_w', True),
'h': TypeCode(rffi.SHORT, 'int_w', True, True),
'H': TypeCode(rffi.USHORT, 'int_w', True),
'i': TypeCode(rffi.INT, 'int_w', True, True),
'I': _UINTTypeCode,
'l': TypeCode(rffi.LONG, 'int_w', True, True),
'L': TypeCode(rffi.ULONG, 'bigint_w', # Overflow handled by
errorname="integer"), # rbigint.touint() which
# corresponds to the
# C-type unsigned long
'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'),
'd': TypeCode(lltype.Float, 'float_w', method='__float__'),
}
for k, v in types.items():
v.typecode = k
unroll_typecodes = unrolling_iterable(types.keys())
class ArrayBuffer(RawBuffer):
_immutable_ = True
def __init__(self, w_array, readonly):
self.w_array = w_array
self.readonly = readonly
def getlength(self):
return self.w_array.len * self.w_array.itemsize
def getitem(self, index):
w_array = self.w_array
data = w_array._charbuf_start()
char = data[index]
w_array._charbuf_stop()
return char
def setitem(self, index, char):
w_array = self.w_array
data = w_array._charbuf_start()
data[index] = char
w_array._charbuf_stop()
def getslice(self, start, stop, step, size):
if size == 0:
return ''
if step == 1:
data = self.w_array._charbuf_start()
try:
return rffi.charpsize2str(rffi.ptradd(data, start), size)
finally:
self.w_array._charbuf_stop()
return RawBuffer.getslice(self, start, stop, step, size)
def get_raw_address(self):
return self.w_array._charbuf_start()
unpack_driver = jit.JitDriver(name='unpack_array',
greens=['selfclass', 'tp'],
reds=['self', 'w_iterator'])
def make_array(mytype):
W_ArrayBase = globals()['W_ArrayBase']
class W_Array(W_ArrayBase):
itemsize = mytype.bytes
typecode = mytype.typecode
_attrs_ = W_ArrayBase._attrs_
def get_buffer(self):
return rffi.cast(mytype.arrayptrtype, self._buffer)
def item_w(self, w_item):
space = self.space
unwrap = getattr(space, mytype.unwrap)
try:
item = unwrap(w_item)
except OperationError as e:
if space.isinstance_w(w_item, space.w_float):
# Odd special case from cpython
raise
if mytype.method != '' and e.match(space, space.w_TypeError):
try:
item = unwrap(space.call_method(w_item, mytype.method))
except OperationError:
raise oefmt(space.w_TypeError,
"array item must be " + mytype.errorname)
else:
raise
if mytype.unwrap == 'bigint_w':
try:
item = item.touint()
except (ValueError, OverflowError):
raise oefmt(space.w_OverflowError,
"unsigned %d-byte integer out of range",
mytype.bytes)
return rffi.cast(mytype.itemtype, item)
if mytype.unwrap == 'bytes_w' or mytype.unwrap == 'unicode_w':
if len(item) != 1:
raise oefmt(space.w_TypeError, "array item must be char")
item = item[0]
return rffi.cast(mytype.itemtype, item)
#
# "regular" case: it fits in an rpython integer (lltype.Signed)
# or it is a float
return self.item_from_int_or_float(item)
def item_from_int_or_float(self, item):
result = rffi.cast(mytype.itemtype, item)
if mytype.canoverflow:
if rffi.cast(lltype.Signed, result) != item:
# overflow. build the correct message
if item < 0:
msg = ('signed %d-byte integer is less than minimum' %
mytype.bytes)
else:
msg = ('signed %d-byte integer is greater than maximum'
% mytype.bytes)
if not mytype.signed:
msg = 'un' + msg # 'signed' => 'unsigned'
raise OperationError(self.space.w_OverflowError,
self.space.newtext(msg))
return result
def fromsequence(self, w_seq):
space = self.space
oldlen = self.len
newlen = oldlen
# optimized case for arrays of integers or floats
if mytype.unwrap == 'int_w':
lst = space.listview_int(w_seq)
elif mytype.unwrap == 'float_w':
lst = space.listview_float(w_seq)
else:
lst = None
if lst is not None:
self.setlen(oldlen + len(lst))
try:
buf = self.get_buffer()
for num in lst:
buf[newlen] = self.item_from_int_or_float(num)
newlen += 1
except OperationError:
self.setlen(newlen)
raise
keepalive_until_here(self)
return
# this is the common case: w_seq is a list or a tuple
lst_w = space.listview_no_unpack(w_seq)
if lst_w is not None:
self.setlen(oldlen + len(lst_w))
buf = self.get_buffer()
try:
for w_num in lst_w:
# note: self.item_w() might invoke arbitrary code.
# In case it resizes the same array, then strange
# things may happen, but as we don't reload 'buf'
# we know that one is big enough for all items
# (so at least we avoid crashes)
buf[newlen] = self.item_w(w_num)
newlen += 1
except OperationError:
if buf == self.get_buffer():
self.setlen(newlen)
raise
keepalive_until_here(self)
return
self._fromiterable(w_seq)
def extend(self, w_iterable, accept_different_array=False):
space = self.space
if isinstance(w_iterable, W_Array):
oldlen = self.len
new = w_iterable.len
self.setlen(self.len + new)
i = 0
buf = self.get_buffer()
srcbuf = w_iterable.get_buffer()
while i < new:
if oldlen + i >= self.len:
self.setlen(oldlen + i + 1)
buf[oldlen + i] = srcbuf[i]
i += 1
keepalive_until_here(w_iterable)
self.setlen(oldlen + i)
elif (not accept_different_array
and isinstance(w_iterable, W_ArrayBase)):
raise oefmt(space.w_TypeError,
"can only extend with array of same kind")
else:
self.fromsequence(w_iterable)
def w_getitem(self, space, idx):
item = self.get_buffer()[idx]
keepalive_until_here(self)
if mytype.typecode in 'bBhHil':
item = rffi.cast(lltype.Signed, item)
return space.newint(item)
if mytype.typecode in 'IL':
return space.newint(item)
elif mytype.typecode in 'fd':
item = float(item)
return space.newfloat(item)
elif mytype.typecode == 'c':
return space.newbytes(item)
elif mytype.typecode == 'u':
return space.newunicode(item)
assert 0, "unreachable"
# interface
def descr_append(self, space, w_x):
x = self.item_w(w_x)
index = self.len
self.setlen(index + 1)
self.get_buffer()[index] = x
keepalive_until_here(self)
# List interface
def descr_reverse(self, space):
b = self.get_buffer()
for i in range(self.len / 2):
b[i], b[self.len - i - 1] = b[self.len - i - 1], b[i]
keepalive_until_here(self)
def descr_pop(self, space, i):
if i < 0:
i += self.len
if i < 0 or i >= self.len:
raise oefmt(space.w_IndexError, "pop index out of range")
w_val = self.w_getitem(space, i)
b = self.get_buffer()
while i < self.len - 1:
b[i] = b[i + 1]
i += 1
keepalive_until_here(self)
self.setlen(self.len - 1)
return w_val
def descr_insert(self, space, idx, w_val):
if idx < 0:
idx += self.len
if idx < 0:
idx = 0
if idx > self.len:
idx = self.len
val = self.item_w(w_val)
self.setlen(self.len + 1)
i = self.len - 1
b = self.get_buffer()
while i > idx:
b[i] = b[i - 1]
i -= 1
b[i] = val
keepalive_until_here(self)
def getitem_slice(self, space, w_idx):
start, stop, step, size = space.decode_index4(w_idx, self.len)
w_a = mytype.w_class(self.space)
w_a.setlen(size, overallocate=False)
assert step != 0
j = 0
buf = w_a.get_buffer()
srcbuf = self.get_buffer()
for i in range(start, stop, step):
buf[j] = srcbuf[i]
j += 1
keepalive_until_here(self)
keepalive_until_here(w_a)
return w_a
def setitem(self, space, w_idx, w_item):
idx, stop, step = space.decode_index(w_idx, self.len)
if step != 0:
raise oefmt(self.space.w_TypeError,
"can only assign array to array slice")
item = self.item_w(w_item)
self.get_buffer()[idx] = item
keepalive_until_here(self)
def setitem_slice(self, space, w_idx, w_item):
if not isinstance(w_item, W_Array):
raise oefmt(space.w_TypeError,
"can only assign to a slice array")
start, stop, step, size = self.space.decode_index4(w_idx, self.len)
assert step != 0
if w_item.len != size or self is w_item:
if start == self.len and step > 0:
# we actually want simply extend()
self.extend(w_item)
else:
# XXX this is a giant slow hack
w_lst = self.descr_tolist(space)
w_item = space.call_method(w_item, 'tolist')
space.setitem(w_lst, w_idx, w_item)
self.setlen(0)
self.fromsequence(w_lst)
else:
j = 0
buf = self.get_buffer()
srcbuf = w_item.get_buffer()
for i in range(start, stop, step):
buf[i] = srcbuf[j]
j += 1
keepalive_until_here(w_item)
keepalive_until_here(self)
def _repeat_single_item(self, a, start, repeat):
# <a performance hack>
assert isinstance(a, W_Array)
item = self.get_buffer()[0]
dstbuf = a.get_buffer()
for r in range(start, repeat):
dstbuf[r] = item
mytype.w_class = W_Array
W_Array.constructor = W_Array
name = 'ArrayType' + mytype.typecode
W_Array.__name__ = 'W_' + name
for mytype in types.values():
make_array(mytype)
del mytype
|
196329
|
from rpython.rlib.debug import debug_print
from rpython.rlib.jit import Counters, JitHookInterface
class DebugPrinter(object):
# _immutable_fields_ = "enabled?",
enabled = False
def enableDebugPrint(self):
self.enabled = True
def debugPrint(self, *args):
if self.enabled:
debug_print(*args)
debugPrinter = DebugPrinter()
debugPrint = debugPrinter.debugPrint
enableDebugPrint = debugPrinter.enableDebugPrint
class TyphonJitHooks(JitHookInterface):
def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops,
operations):
if True:
return
reasonString = Counters.counter_names[reason]
print "Aborted trace:", greenkey_repr, reasonString, "operations", len(operations)
def after_compile(self, debug_info):
if True:
return
print "Compiled:", debug_info.get_greenkey_repr(), "operations", len(debug_info.operations)
def after_compile_bridge(self, debug_info):
if True:
return
print "Compiled bridge: operations", len(debug_info.operations)
|
196358
|
import unittest
import numpy as np
from codecarbon.external.hardware import RAM
# TODO: need help: test multiprocess case
class TestRAM(unittest.TestCase):
def test_ram_diff(self):
ram = RAM(tracking_mode="process")
for array_size in [
# (10, 10), # too small to be noticed
# (100, 100), # too small to be noticed
(1000, 1000), # ref for atol
(10, 1000, 1000),
(20, 1000, 1000),
(100, 1000, 1000),
(200, 1000, 1000),
(1000, 1000, 1000),
(2000, 1000, 1000),
]:
with self.subTest(array_size=array_size):
ref_W = ram.total_power().W
array = np.ones(array_size, dtype=np.int8)
new_W = ram.total_power().W
n_gb = array.nbytes / (1000 ** 3)
n_gb_W = (new_W - ref_W) / ram.power_per_GB
is_close = np.isclose(n_gb, n_gb_W, atol=1e-3)
self.assertTrue(
is_close,
msg=f"{array_size}, {n_gb}, {n_gb_W}, {is_close}",
)
del array
|
196361
|
from __future__ import unicode_literals
from mopidy_auto import Extension
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert '[auto]' in config
assert 'enabled = true' in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
assert 'admin_key' in schema
assert 'base_path' in schema
assert 'max_tracks' in schema
for index in range(3):
assert "s{}_start".format(index) in schema
assert "s{}_folder".format(index) in schema
assert "s{}_max_volume".format(index) in schema
# TODO Write more test
|
196365
|
import typing as t
from jinja2 import BaseLoader, ChoiceLoader, Environment, PackageLoader
from jinja2.ext import Extension
from pydantic import BaseSettings, Field, IPvAnyAddress, root_validator
from pydantic.color import Color
class DebugToolbarSettings(BaseSettings):
DEFAULT_PANELS: t.List[str] = Field(
[
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.pydantic.PydanticPanel",
"debug_toolbar.panels.routes.RoutesPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
],
description=(
"Specifies the full Python path to each panel that you "
"want included in the toolbar."
),
)
PANELS: t.List[str] = Field(
[],
description=(
"A list of the full Python paths to each panel that you "
"want to append to `DEFAULT_PANELS`."
),
)
DISABLE_PANELS: t.Sequence[str] = Field(
["debug_toolbar.panels.redirects.RedirectsPanel"],
description=(
"A list of the full Python paths to each panel that you "
"want disabled (but still displayed) by default."
),
)
ALLOWED_IPS: t.Optional[t.Sequence[IPvAnyAddress]] = Field(
None,
description=(
"If it's set, the Debug Toolbar is shown only "
"if your IP address is listed."
),
)
JINJA_ENV: Environment = Field(
Environment(),
description="The Jinja environment instance used to render the toolbar.",
)
JINJA_LOADERS: t.List[BaseLoader] = Field(
[],
description=(
"Jinja `BaseLoader` subclasses used to load templates "
"from the file system or other locations."
),
)
JINJA_EXTENSIONS: t.Sequence[t.Union[str, t.Type[Extension]]] = Field(
[],
description=(
"Load the extensions from the list and bind them to the Jinja environment."
),
)
API_URL: str = Field(
"/_debug_toolbar",
description="URL prefix to use for toolbar endpoints.",
)
STATIC_URL: str = Field(
f"{API_URL.default}/static", # type: ignore
description="URL to use when referring to toolbar static files.",
)
SHOW_TOOLBAR_CALLBACK: str = Field(
"debug_toolbar.middleware.show_toolbar",
description=(
"This is the dotted path to a function used for "
"determining whether the toolbar should show or not."
),
)
INSERT_BEFORE: str = Field(
"</body>",
description=(
"The toolbar searches for this string in the HTML "
"and inserts itself just before."
),
)
SHOW_COLLAPSE: bool = Field(
False,
description="If changed to `True`, the toolbar will be collapsed by default.",
)
ROOT_TAG_EXTRA_ATTRS: str = Field(
"",
description=(
"This setting is injected in the root template div "
"in order to avoid conflicts with client-side frameworks"
),
)
RESULTS_CACHE_SIZE: int = Field(
25,
description="The toolbar keeps up to this many results in memory.",
)
PROFILER_OPTIONS: t.Dict[str, t.Any] = Field(
{"interval": 0.0001},
description="A list of arguments can be supplied to the Profiler.",
)
SETTINGS: t.Sequence[BaseSettings] = Field(
[],
description=(
"pydantic's `BaseSettings` instances to be "
"displayed on the `SettingsPanel`."
),
)
LOGGING_COLORS: t.Dict[str, Color] = Field(
{
"CRITICAL": Color("rgba(255, 0, 0, .4)"),
"ERROR": Color("rgba(255, 0, 0, .2)"),
"WARNING": Color("rgba(255, 165, 0, .2)"),
"INFO": Color("rgba(135, 206, 235, .2)"),
"DEBUG": Color("rgba(128, 128, 128, .2)"),
},
description="Color palette used to apply colors based on the log level.",
)
SQL_WARNING_THRESHOLD: int = Field(
500,
description=(
"The SQL panel highlights queries that took more that this amount of "
"time, in milliseconds, to execute."
),
)
class Config:
title = "Debug Toolbar"
env_prefix = "DT_"
case_sensitive = True
def __init__(self, **settings: t.Any) -> None:
super().__init__(**settings)
loaders = self.JINJA_LOADERS + [PackageLoader("debug_toolbar", "templates")]
self.JINJA_ENV.loader = ChoiceLoader(loaders)
self.JINJA_ENV.trim_blocks = True
self.JINJA_ENV.lstrip_blocks = True
for extension in self.JINJA_EXTENSIONS:
self.JINJA_ENV.add_extension(extension)
@root_validator(pre=True)
def ci(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
return {k.upper(): v for k, v in values.items()}
|
196416
|
from kernels import wave_kernel
from geom_utils import src_rec
from wave_utils import (wf_as_src, wavefield, otf_dft, extended_src_weights,
extented_src, wavefield_subsampled, weighted_norm)
from sensitivity import grad_expr, lin_src
from utils import weight_fun, opt_op
from devito import Operator, Function
from devito.tools import as_tuple
def name(model):
return "tti" if model.is_tti else ""
# Forward propagation
def forward(model, src_coords, rcv_coords, wavelet, space_order=8, save=False,
q=None, return_op=False, freq_list=None, dft_sub=None,
ws=None, t_sub=1, **kwargs):
"""
Low level propagator, to be used through `interface.py`
Compute forward wavefield u = A(m)^{-1}*f and related quantities (u(xrcv))
"""
# Number of time steps
nt = as_tuple(q)[0].shape[0] if wavelet is None else wavelet.shape[0]
# Setting forward wavefield
u = wavefield(model, space_order, save=save, nt=nt, t_sub=t_sub)
# Expression for saving wavefield if time subsampling is used
u_save, eq_save = wavefield_subsampled(model, u, nt, t_sub)
# Add extended source
q = q or wf_as_src(u, w=0)
q = extented_src(model, ws, wavelet, q=q)
# Set up PDE expression and rearrange
pde = wave_kernel(model, u, q=q)
# Setup source and receiver
geom_expr, _, rcv = src_rec(model, u, src_coords=src_coords, nt=nt,
rec_coords=rcv_coords, wavelet=wavelet)
# On-the-fly Fourier
dft, dft_modes = otf_dft(u, freq_list, model.critical_dt, factor=dft_sub)
# Create operator and run
subs = model.spacing_map
op = Operator(pde + dft + geom_expr + eq_save,
subs=subs, name="forward"+name(model),
opt=opt_op(model))
op.cfunction
if return_op:
return op, u, rcv
summary = op()
# Output
return rcv, dft_modes or (u_save if t_sub > 1 else u), summary
def adjoint(model, y, src_coords, rcv_coords, space_order=8, q=0, dft_sub=None,
save=False, ws=None, norm_v=False, w_fun=None, freq_list=None):
"""
Low level propagator, to be used through `interface.py`
Compute adjoint wavefield v = adjoint(F(m))*y
and related quantities (||v||_w, v(xsrc))
"""
# Number of time steps
nt = as_tuple(q)[0].shape[0] if y is None else y.shape[0]
# Setting adjoint wavefield
v = wavefield(model, space_order, save=save, nt=nt, fw=False)
# Set up PDE expression and rearrange
pde = wave_kernel(model, v, q=q, fw=False)
# On-the-fly Fourier
dft, dft_modes = otf_dft(v, freq_list, model.critical_dt, factor=dft_sub)
# Setup source and receiver
geom_expr, _, rcv = src_rec(model, v, src_coords=rcv_coords, nt=nt,
rec_coords=src_coords, wavelet=y, fw=False)
# Extended source
wsrc, ws_expr = extended_src_weights(model, ws, v)
# Wavefield norm
nv_t, nv_s = ([], [])
if norm_v:
weights = weight_fun(w_fun, model, src_coords)
norm_v, (nv_t, nv_s) = weighted_norm(v, weight=weights)
# Create operator and run
subs = model.spacing_map
op = Operator(pde + ws_expr + nv_t + dft + geom_expr + nv_s,
subs=subs, name="adjoint"+name(model),
opt=opt_op(model))
op.cfunction
# Run operator
summary = op()
# Output
if wsrc:
return wsrc, summary
if norm_v:
return rcv, dft_modes or v, norm_v.data[0], summary
return rcv, v, summary
def gradient(model, residual, rcv_coords, u, return_op=False, space_order=8,
w=None, freq=None, dft_sub=None, isic=False):
"""
Low level propagator, to be used through `interface.py`
Compute the action of the adjoint Jacobian onto a residual J'* δ d.
"""
# Setting adjoint wavefieldgradient
v = wavefield(model, space_order, fw=False)
# Set up PDE expression and rearrange
pde = wave_kernel(model, v, fw=False)
# Setup source and receiver
geom_expr, _, _ = src_rec(model, v, src_coords=rcv_coords,
wavelet=residual, fw=False)
# Setup gradient wrt m
gradm = Function(name="gradm", grid=model.grid)
g_expr = grad_expr(gradm, u, v, model, w=w, freq=freq, dft_sub=dft_sub, isic=isic)
# Create operator and run
subs = model.spacing_map
op = Operator(pde + geom_expr + g_expr,
subs=subs, name="gradient"+name(model),
opt=opt_op(model))
try:
op.cfunction
except:
op = Operator(pde + geom_expr + g_expr,
subs=subs, name="gradient"+name(model),
opt='advanced')
op.cfunction
if return_op:
return op, gradm, v
summary = op()
# Output
return gradm, summary
def born(model, src_coords, rcv_coords, wavelet, space_order=8, save=False,
q=None, return_op=False, isic=False, freq_list=None, dft_sub=None,
ws=None, t_sub=1, nlind=False):
"""
Low level propagator, to be used through `interface.py`
Compute linearized wavefield U = J(m)* δ m
and related quantities.
"""
nt = wavelet.shape[0]
# Setting wavefield
u = wavefield(model, space_order, save=save, nt=nt, t_sub=t_sub)
ul = wavefield(model, space_order, name="l")
# Expression for saving wavefield if time subsampling is used
u_save, eq_save = wavefield_subsampled(model, u, nt, t_sub)
# Extended source
q = q or wf_as_src(u, w=0)
q = extented_src(model, ws, wavelet, q=q)
# Set up PDE expression and rearrange
pde = wave_kernel(model, u, q=q)
if model.dm == 0:
pdel = []
else:
pdel = wave_kernel(model, ul, q=lin_src(model, u, isic=isic))
# Setup source and receiver
geom_expr, _, rcvnl = src_rec(model, u, rec_coords=rcv_coords if nlind else None,
src_coords=src_coords, wavelet=wavelet)
geom_exprl, _, rcvl = src_rec(model, ul, rec_coords=rcv_coords, nt=nt)
# On-the-fly Fourier
dft, dft_modes = otf_dft(u, freq_list, model.critical_dt, factor=dft_sub)
# Create operator and run
subs = model.spacing_map
op = Operator(pde + geom_expr + geom_exprl + pdel + dft + eq_save,
subs=subs, name="born"+name(model),
opt=opt_op(model))
op.cfunction
outrec = (rcvl, rcvnl) if nlind else rcvl
if return_op:
return op, u, outrec
summary = op()
# Output
return outrec, dft_modes or (u_save if t_sub > 1 else u), summary
# Forward propagation
def forward_grad(model, src_coords, rcv_coords, wavelet, v, space_order=8,
q=None, ws=None, isic=False, w=None, freq=None, **kwargs):
"""
Low level propagator, to be used through `interface.py`
Compute forward wavefield u = A(m)^{-1}*f and related quantities (u(xrcv))
"""
# Number of time steps
nt = as_tuple(q)[0].shape[0] if wavelet is None else wavelet.shape[0]
# Setting forward wavefield
u = wavefield(model, space_order, save=False)
# Add extended source
q = q or wf_as_src(u, w=0)
q = extented_src(model, ws, wavelet, q=q)
# Set up PDE expression and rearrange
pde = wave_kernel(model, u, q=q)
# Setup source and receiver
geom_expr, _, rcv = src_rec(model, u, src_coords=src_coords, nt=nt,
rec_coords=rcv_coords, wavelet=wavelet)
# Setup gradient wrt m
gradm = Function(name="gradm", grid=model.grid)
g_expr = grad_expr(gradm, v, u, model, w=w, isic=isic, freq=freq)
# Create operator and run
subs = model.spacing_map
op = Operator(pde + geom_expr + g_expr,
subs=subs, name="forward_grad"+name(model),
opt=opt_op(model))
summary = op()
# Output
return rcv, gradm, summary
|
196471
|
import datetime
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
class Book(models.Model):
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __unicode__(self):
return self.title
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
def __unicode__(self):
return u'%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __unicode__(self):
return self.title
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __unicode__(self):
return u'%s - %s' % (self.title, self.notes)
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __unicode__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place)
def __unicode__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, primary_key=True)
age = models.PositiveIntegerField()
def __unicode__(self):
return "%s is %d" % (self.owner.name, self.age)
class Restaurant(Place):
serves_pizza = models.BooleanField()
def __unicode__(self):
return self.name
class Product(models.Model):
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return u"%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField()
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField()
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
class Repository(models.Model):
name = models.CharField(max_length=25)
def __unicode__(self):
return self.name
class Revision(models.Model):
repository = models.ForeignKey(Repository)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __unicode__(self):
return u"%s (%s)" % (self.revision, unicode(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
class Player(models.Model):
team = models.ForeignKey(Team, null=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
class Poet(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Poem(models.Model):
poet = models.ForeignKey(Poet)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __unicode__(self):
return self.name
|
196473
|
class TestStackiBoxInfo:
def test_no_name(self, run_ansible_module):
result = run_ansible_module("stacki_box_info")
assert result.status == "SUCCESS"
assert result.data["changed"] == False
assert len(result.data["boxes"]) == 2
def test_with_name(self, run_ansible_module):
result = run_ansible_module("stacki_box_info", name="default")
assert result.status == "SUCCESS"
assert result.data["changed"] == False
assert len(result.data["boxes"]) == 1
assert result.data["boxes"][0]["name"] == "default"
def test_bad_name(self, run_ansible_module):
result = run_ansible_module("stacki_box_info", name="foo")
assert result.status == "FAILED!"
assert result.data["changed"] == False
assert "error" in result.data["msg"]
assert "not a valid box" in result.data["msg"]
|
196480
|
from __future__ import print_function
import time
start = time.clock()
map = {}
for i in range(1, 2000001):
map[i] = i
sum = 0
for i in range(1, 2000001):
sum = sum + map[i]
print(sum)
for i in range(1, 2000001):
del map[i]
print("elapsed: " + str(time.clock() - start))
|
196499
|
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
Loss Prediction Module in PyTorch (https://arxiv.org/abs/1905.03677)
Code adapted from: https://github.com/Mephisto405/Learning-Loss-for-Active-Learning
'''
class LossNet(nn.Module):
def __init__(self, feature_sizes=None, num_channels=None, interm_dim=128):
super(LossNet, self).__init__()
if feature_sizes is None:
feature_sizes = [32, 16, 8, 4]
if num_channels is None:
num_channels = [64, 128, 256, 512]
self.GAP1 = nn.AvgPool2d(feature_sizes[0])
self.GAP2 = nn.AvgPool2d(feature_sizes[1])
self.GAP3 = nn.AvgPool2d(feature_sizes[2])
self.GAP4 = nn.AvgPool2d(feature_sizes[3])
self.FC1 = nn.Linear(num_channels[0], interm_dim)
self.FC2 = nn.Linear(num_channels[1], interm_dim)
self.FC3 = nn.Linear(num_channels[2], interm_dim)
self.FC4 = nn.Linear(num_channels[3], interm_dim)
self.linear = nn.Linear(4 * interm_dim, 1)
def forward(self, features):
out1 = self.GAP1(features[0])
out1 = out1.view(out1.size(0), -1)
out1 = F.relu(self.FC1(out1))
out2 = self.GAP2(features[1])
out2 = out2.view(out2.size(0), -1)
out2 = F.relu(self.FC2(out2))
out3 = self.GAP3(features[2])
out3 = out3.view(out3.size(0), -1)
out3 = F.relu(self.FC3(out3))
out4 = self.GAP4(features[3])
out4 = out4.view(out4.size(0), -1)
out4 = F.relu(self.FC4(out4))
out = self.linear(torch.cat((out1, out2, out3, out4), 1))
return out
|
196503
|
import logging
from icrawl_plugin import IHostCrawler
from utils.dockerutils import exec_dockerps
from utils.features import DockerPSFeature
logger = logging.getLogger('crawlutils')
class DockerpsHostCrawler(IHostCrawler):
def get_feature(self):
return 'dockerps'
def crawl(self, **kwargs):
logger.debug('Crawling %s' % (self.get_feature()))
for inspect in exec_dockerps():
yield (inspect['Id'], DockerPSFeature._make([
inspect['State']['Running'],
0,
inspect['Image'],
[],
inspect['Config']['Cmd'],
inspect['Name'],
inspect['Id'],
]), 'dockerps')
|
196580
|
import os
import time
import pickle
import random
import numpy as np
import tensorflow as tf
import sys
from input import DataInput, DataInputTest
from model import Model
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=256, help="inference batch size")
args = parser.parse_args()
random.seed(1234)
np.random.seed(1234)
tf.set_random_seed(1234)
predict_batch_size = args.batch_size
predict_ads_num = 1
with open('dataset.pkl', 'rb') as f:
train_set = pickle.load(f)
test_set = pickle.load(f)
cate_list = pickle.load(f)
user_count, item_count, cate_count = pickle.load(f)
best_auc = 0.0
def _auc_arr(score):
score_p = score[:,0]
score_n = score[:,1]
#print "============== p ============="
#print score_p
#print "============== n ============="
#print score_n
score_arr = []
for s in score_p.tolist():
score_arr.append([0, 1, s])
for s in score_n.tolist():
score_arr.append([1, 0, s])
return score_arr
def _test(sess, model):
print('Round Batch size Recommendations / sec')
total_time = 0
perf_total = []
score_append = np.empty((predict_batch_size, predict_ads_num, 1), float)
iteration = 0
# warp up
for _, uij in DataInputTest(test_set, predict_batch_size):
score_ = model.test(sess,uij)
score_append = np.append(score_append, score_, axis = 0)
iteration += 1
if iteration == 5:
np.save('inference_' + str(predict_batch_size) +'.npy', score_append)
break
# start testing
time_st = time.time()
iteration = 0
for _, uij in DataInputTest(test_set, predict_batch_size):
if len(uij[0]) != predict_batch_size:
break
s_time = time.time()
score_ = model.test(sess, uij)
e_time = time.time()
total_time += e_time - s_time
iteration += 1
if iteration % 1000 == 0:
time_dur = time.time() - time_st
perf = predict_batch_size * iteration / time_dur
print(' %2i %4i %10.1f' % (iteration, predict_batch_size, perf ))
# break
# elif iteration % 100 == 0:
# time_dur = time.time() - time_st
# perf = predict_batch_size * iteration / time_dur
# print(' %2i %4i %10.1f' % (iteration, predict_batch_size, perf ))
time_dur = time.time() - time_st
perf = predict_batch_size * iteration / time_dur
print("Average performance is %10.1f for batch size=" % perf, predict_batch_size)
print("Total recommendations: %d" % len(test_set))
print("Approximate accelerator time in seconds: %.3f" % total_time)
print("Approximate accelerator performance in recommendations/second: %.3f" % (len(test_set)/total_time))
devices = ['/gpu:0']
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
model = Model(user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num, 1, devices)
model.restore(sess, 'save_path/ckpt')
_test(sess, model)
|
196597
|
from fac.commands import Command, Arg
class PackUnpackCommand(Command):
arguments = [
Arg('mods', nargs='+', help="mods patterns to affect"),
Arg('-R', '--replace', action='store_true',
help="replace existing file/directory when packing/unpacking"),
Arg('-K', '--keep', action='store_true',
help="keep existing directory/file after packing/unpacking"),
]
def run(self, args):
pack = self.name == 'pack'
for mod_pattern in args.mods:
mod_pattern = self.manager.resolve_mod_name(mod_pattern)
mods = self.manager.find_mods(mod_pattern, packed=not pack)
if not mods:
print("No %sable found for %s." % (self.name,
mod_pattern))
continue
for mod in mods:
dup_mod = self.manager.get_mod(mod.name, mod.version,
packed=pack)
if dup_mod and not args.replace:
print("%s is already %sed. Use -R to replace it." % (
mod.name, self.name
))
continue
if pack:
mod.pack(replace=args.replace, keep=args.keep)
else:
mod.unpack(replace=args.replace, keep=args.keep)
print("%s is now %sed" % (mod.name, self.name))
class PackCommand(PackUnpackCommand):
"""Pack mods."""
name = 'pack'
class UnpackCommand(PackUnpackCommand):
"""Unpack mods."""
name = 'unpack'
|
196605
|
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
from mmcv.ops.carafe import CARAFEPack
from ..builder import NECKS
@NECKS.register_module()
class FPN_CARAFE(nn.Module):
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
choice for upsample methods during the top-down pathway.
It can reproduce the preformance of ICCV 2019 paper
CARAFE: Content-Aware ReAssembly of FEatures
Please refer to https://arxiv.org/abs/1905.02188 for more details.
Args:
in_channels (list[int]): Number of channels for each input feature map.
out_channels (int): Output channels of feature pyramids.
num_outs (int): Number of output stages.
start_level (int): Start level of feature pyramids.
(Default: 0)
end_level (int): End level of feature pyramids.
(Default: -1 indicates the last level).
norm_cfg (dict): Dictionary to construct and config norm layer.
activate (str): Type of activation function in ConvModule
(Default: None indicates w/o activation).
order (dict): Order of components in ConvModule.
upsample (str): Type of upsample layer.
upsample_cfg (dict): Dictionary to construct and config upsample layer.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1)):
super(FPN_CARAFE, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_bias = norm_cfg is None
self.upsample_cfg = upsample_cfg.copy()
self.upsample = self.upsample_cfg.get('type')
self.relu = nn.ReLU(inplace=False)
self.order = order
assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
assert self.upsample in [
'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
]
if self.upsample in ['deconv', 'pixel_shuffle']:
assert hasattr(
self.upsample_cfg,
'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.upsample_modules = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if i != self.backbone_end_level - 1:
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample == 'deconv':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsample_cfg_.update(channels=out_channels, scale_factor=2)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsample_cfg_.update(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsample_module = build_upsample_layer(upsample_cfg_)
self.upsample_modules.append(upsample_module)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_out_levels = (
num_outs - self.backbone_end_level + self.start_level)
if extra_out_levels >= 1:
for i in range(extra_out_levels):
in_channels = (
self.in_channels[self.backbone_end_level -
1] if i == 0 else out_channels)
extra_l_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if self.upsample == 'deconv':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsampler_cfg_ = dict(
channels=out_channels,
scale_factor=2,
**self.upsample_cfg)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsampler_cfg_ = dict(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample
upsample_module = build_upsample_layer(upsampler_cfg_)
extra_fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
self.upsample_modules.append(upsample_module)
self.fpn_convs.append(extra_fpn_conv)
self.lateral_convs.append(extra_l_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of module."""
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
xavier_init(m, distribution='uniform')
for m in self.modules():
if isinstance(m, CARAFEPack):
m.init_weights()
def slice_as(self, src, dst):
"""Slice ``src`` as ``dst``
Note:
``src`` should have the same or larger size than ``dst``.
Args:
src (torch.Tensor): Tensors to be sliced.
dst (torch.Tensor): ``src`` will be sliced to have the same
size as ``dst``.
Returns:
torch.Tensor: Sliced tensor.
"""
assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
return src
else:
return src[:, :, :dst.size(2), :dst.size(3)]
def tensor_add(self, a, b):
"""Add tensors ``a`` and ``b`` that might have different sizes."""
if a.size() == b.size():
c = a + b
else:
c = a + self.slice_as(b, a)
return c
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = []
for i, lateral_conv in enumerate(self.lateral_convs):
if i <= self.backbone_end_level - self.start_level:
input = inputs[min(i + self.start_level, len(inputs) - 1)]
else:
input = laterals[-1]
lateral = lateral_conv(input)
laterals.append(lateral)
# build top-down path
for i in range(len(laterals) - 1, 0, -1):
if self.upsample is not None:
upsample_feat = self.upsample_modules[i - 1](laterals[i])
else:
upsample_feat = laterals[i]
laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
# build outputs
num_conv_outs = len(self.fpn_convs)
outs = []
for i in range(num_conv_outs):
out = self.fpn_convs[i](laterals[i])
outs.append(out)
return tuple(outs)
|
196613
|
from __future__ import absolute_import
import sys
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from IPython import embed
class AttentionRecognitionHead(nn.Module):
"""
input: [b x 16 x 64 x in_planes]
output: probability sequence: [b x T x num_classes]
"""
def __init__(self, num_classes, in_planes, sDim, attDim, max_len_labels):
super(AttentionRecognitionHead, self).__init__()
self.num_classes = num_classes # this is the output classes. So it includes the <EOS>.
self.in_planes = in_planes
self.sDim = sDim
self.attDim = attDim
self.max_len_labels = max_len_labels
self.decoder = DecoderUnit(sDim=sDim, xDim=in_planes, yDim=num_classes, attDim=attDim)
def forward(self, x):
x, targets, lengths = x
batch_size = x.size(0)
# Decoder
state = torch.zeros(1, batch_size, self.sDim).cuda()
outputs = []
for i in range(max(lengths)):
if i == 0:
y_prev = torch.zeros((batch_size)).fill_(self.num_classes).cuda() # the last one is used as the <BOS>.
else:
y_prev = targets[:,i-1].cuda()
output, state = self.decoder(x, state, y_prev)
outputs.append(output)
outputs = torch.cat([_.unsqueeze(1) for _ in outputs], 1)
return outputs
# inference stage.
def sample(self, x):
x, _, _ = x
batch_size = x.size(0)
# Decoder
state = torch.zeros(1, batch_size, self.sDim)
predicted_ids, predicted_scores = [], []
for i in range(self.max_len_labels):
if i == 0:
y_prev = torch.zeros((batch_size)).fill_(self.num_classes)
else:
y_prev = predicted
output, state = self.decoder(x, state, y_prev)
output = F.softmax(output, dim=1)
score, predicted = output.max(1)
predicted_ids.append(predicted.unsqueeze(1))
predicted_scores.append(score.unsqueeze(1))
predicted_ids = torch.cat(predicted_ids, 1)
predicted_scores = torch.cat(predicted_scores, 1)
# return predicted_ids.squeeze(), predicted_scores.squeeze()
return predicted_ids, predicted_scores
def beam_search(self, x, beam_width, eos):
def _inflate(tensor, times, dim):
repeat_dims = [1] * tensor.dim()
repeat_dims[dim] = times
return tensor.repeat(*repeat_dims)
# https://github.com/IBM/pytorch-seq2seq/blob/fede87655ddce6c94b38886089e05321dc9802af/seq2seq/models/TopKDecoder.py
batch_size, l, d = x.size()
# inflated_encoder_feats = _inflate(encoder_feats, beam_width, 0) # ABC --> AABBCC -/-> ABCABC
inflated_encoder_feats = x.unsqueeze(1).permute((1,0,2,3)).repeat((beam_width,1,1,1)).permute((1,0,2,3)).contiguous().view(-1, l, d)
# Initialize the decoder
state = torch.zeros(1, batch_size * beam_width, self.sDim).cuda()
pos_index = (torch.Tensor(range(batch_size)) * beam_width).long().view(-1, 1).cuda()
# Initialize the scores
sequence_scores = torch.Tensor(batch_size * beam_width, 1).cuda()
sequence_scores.fill_(-float('Inf'))
sequence_scores.index_fill_(0, torch.Tensor([i * beam_width for i in range(0, batch_size)]).long().cuda(), 0.0)
# sequence_scores.fill_(0.0)
# Initialize the input vector
y_prev = torch.zeros((batch_size * beam_width)).fill_(self.num_classes).cuda()
# Store decisions for backtracking
stored_scores = list()
stored_predecessors = list()
stored_emitted_symbols = list()
for i in range(self.max_len_labels):
output, state = self.decoder(inflated_encoder_feats, state, y_prev)
log_softmax_output = F.log_softmax(output, dim=1)
sequence_scores = _inflate(sequence_scores, self.num_classes, 1)
sequence_scores += log_softmax_output
scores, candidates = sequence_scores.view(batch_size, -1).topk(beam_width, dim=1)
# Reshape input = (bk, 1) and sequence_scores = (bk, 1)
y_prev = (candidates % self.num_classes).view(batch_size * beam_width)
sequence_scores = scores.view(batch_size * beam_width, 1)
# Update fields for next timestep
predecessors = (candidates / self.num_classes + pos_index.expand_as(candidates)).view(batch_size * beam_width, 1).long()
# print("state:", self.num_classes, )
state = state.index_select(1, predecessors.squeeze())
# Update sequence socres and erase scores for <eos> symbol so that they aren't expanded
stored_scores.append(sequence_scores.clone())
eos_indices = y_prev.view(-1, 1).eq(eos)
if eos_indices.nonzero().dim() > 0:
sequence_scores.masked_fill_(eos_indices, -float('inf'))
# Cache results for backtracking
stored_predecessors.append(predecessors)
stored_emitted_symbols.append(y_prev)
# Do backtracking to return the optimal values
#====== backtrak ======#
# Initialize return variables given different types
p = list()
l = [[self.max_len_labels] * beam_width for _ in range(batch_size)] # Placeholder for lengths of top-k sequences
# the last step output of the beams are not sorted
# thus they are sorted here
sorted_score, sorted_idx = stored_scores[-1].view(batch_size, beam_width).topk(beam_width)
# initialize the sequence scores with the sorted last step beam scores
s = sorted_score.clone()
batch_eos_found = [0] * batch_size # the number of EOS found
# in the backward loop below for each batch
t = self.max_len_labels - 1
# initialize the back pointer with the sorted order of the last step beams.
# add pos_index for indexing variable with b*k as the first dimension.
t_predecessors = (sorted_idx + pos_index.expand_as(sorted_idx)).view(batch_size * beam_width)
while t >= 0:
# Re-order the variables with the back pointer
current_symbol = stored_emitted_symbols[t].index_select(0, t_predecessors)
t_predecessors = stored_predecessors[t].index_select(0, t_predecessors).squeeze()
eos_indices = stored_emitted_symbols[t].eq(eos).nonzero()
if eos_indices.dim() > 0:
for i in range(eos_indices.size(0)-1, -1, -1):
# Indices of the EOS symbol for both variables
# with b*k as the first dimension, and b, k for
# the first two dimensions
idx = eos_indices[i]
b_idx = int(idx[0] / beam_width)
# The indices of the replacing position
# according to the replacement strategy noted above
res_k_idx = beam_width - (batch_eos_found[b_idx] % beam_width) - 1
batch_eos_found[b_idx] += 1
res_idx = b_idx * beam_width + res_k_idx
# Replace the old information in return variables
# with the new ended sequence information
t_predecessors[res_idx] = stored_predecessors[t][idx[0]]
current_symbol[res_idx] = stored_emitted_symbols[t][idx[0]]
s[b_idx, res_k_idx] = stored_scores[t][idx[0], [0]]
l[b_idx][res_k_idx] = t + 1
# record the back tracked results
p.append(current_symbol)
t -= 1
# Sort and re-order again as the added ended sequences may change
# the order (very unlikely)
s, re_sorted_idx = s.topk(beam_width)
for b_idx in range(batch_size):
l[b_idx] = [l[b_idx][k_idx.item()] for k_idx in re_sorted_idx[b_idx,:]]
re_sorted_idx = (re_sorted_idx + pos_index.expand_as(re_sorted_idx)).view(batch_size*beam_width)
# Reverse the sequences and re-order at the same time
# It is reversed because the backtracking happens in reverse time order
p = [step.index_select(0, re_sorted_idx).view(batch_size, beam_width, -1) for step in reversed(p)]
p = torch.cat(p, -1)[:,0,:]
return p, torch.ones_like(p)
class AttentionUnit(nn.Module):
def __init__(self, sDim, xDim, attDim):
super(AttentionUnit, self).__init__()
self.sDim = sDim
self.xDim = xDim
self.attDim = attDim
self.sEmbed = nn.Linear(sDim, attDim)
self.xEmbed = nn.Linear(xDim, attDim)
self.wEmbed = nn.Linear(attDim, 1)
# self.init_weights()
def init_weights(self):
init.normal_(self.sEmbed.weight, std=0.01)
init.constant_(self.sEmbed.bias, 0)
init.normal_(self.xEmbed.weight, std=0.01)
init.constant_(self.xEmbed.bias, 0)
init.normal_(self.wEmbed.weight, std=0.01)
init.constant_(self.wEmbed.bias, 0)
def forward(self, x, sPrev):
sPrev = sPrev.cuda()
batch_size, T, _ = x.size() # [b x T x xDim]
x = x.contiguous().view(-1, self.xDim) # [(b x T) x xDim]
xProj = self.xEmbed(x) # [(b x T) x attDim]
xProj = xProj.view(batch_size, T, -1) # [b x T x attDim]
sPrev = sPrev.squeeze(0)
from IPython import embed;
# embed()
sProj = self.sEmbed(sPrev) # [b x attDim]
sProj = torch.unsqueeze(sProj, 1) # [b x 1 x attDim]
sProj = sProj.expand(batch_size, T, self.attDim) # [b x T x attDim]
sumTanh = torch.tanh(sProj + xProj)
sumTanh = sumTanh.view(-1, self.attDim)
vProj = self.wEmbed(sumTanh) # [(b x T) x 1]
vProj = vProj.view(batch_size, T)
alpha = F.softmax(vProj, dim=1) # attention weights for each sample in the minibatch
return alpha
class DecoderUnit(nn.Module):
def __init__(self, sDim, xDim, yDim, attDim):
super(DecoderUnit, self).__init__()
self.sDim = sDim
self.xDim = xDim
self.yDim = yDim
self.attDim = attDim
self.emdDim = attDim
self.attention_unit = AttentionUnit(sDim, xDim, attDim)
self.tgt_embedding = nn.Embedding(yDim+1, self.emdDim) # the last is used for <BOS>
self.gru = nn.GRU(input_size=xDim+self.emdDim, hidden_size=sDim, batch_first=True)
self.fc = nn.Linear(sDim, yDim)
# self.init_weights()
def init_weights(self):
init.normal_(self.tgt_embedding.weight, std=0.01)
init.normal_(self.fc.weight, std=0.01)
init.constant_(self.fc.bias, 0)
def forward(self, x, sPrev, yPrev):
sPrev = sPrev.cuda()
# x: feature sequence from the image decoder.
batch_size, T, _ = x.size()
alpha = self.attention_unit(x, sPrev)
context = torch.bmm(alpha.unsqueeze(1), x).squeeze(1)
yPrev = yPrev.cuda()
yProj = self.tgt_embedding(yPrev.long())
self.gru.flatten_parameters()
output, state = self.gru(torch.cat([yProj, context], 1).unsqueeze(1), sPrev)
output = output.squeeze(1)
output = self.fc(output)
return output, state
|
196617
|
import multiprocessing as mp
import logging
import traceback
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim, xfail_with_cuda_python
def child_test():
from numba import cuda, int32, void
from numba.core import config
import io
import numpy as np
import threading
# Enable PTDS before we make any CUDA driver calls. Enabling it first
# ensures that PTDS APIs are used because the CUDA driver looks up API
# functions on first use and memoizes them.
config.CUDA_PER_THREAD_DEFAULT_STREAM = 1
# Set up log capture for the Driver API so we can see what API calls were
# used.
logbuf = io.StringIO()
handler = logging.StreamHandler(logbuf)
cudadrv_logger = logging.getLogger('numba.cuda.cudadrv.driver')
cudadrv_logger.addHandler(handler)
cudadrv_logger.setLevel(logging.DEBUG)
# Set up data for our test, and copy over to the device
N = 2 ** 16
N_THREADS = 10
N_ADDITIONS = 4096
# Seed the RNG for repeatability
np.random.seed(1)
x = np.random.randint(low=0, high=1000, size=N, dtype=np.int32)
r = np.zeros_like(x)
# One input and output array for each thread
xs = [cuda.to_device(x) for _ in range(N_THREADS)]
rs = [cuda.to_device(r) for _ in range(N_THREADS)]
# Compute the grid size and get the [per-thread] default stream
n_threads = 256
n_blocks = N // n_threads
stream = cuda.default_stream()
# A simple multiplication-by-addition kernel. What it does exactly is not
# too important; only that we have a kernel that does something.
@cuda.jit(void(int32[::1], int32[::1]))
def f(r, x):
i = cuda.grid(1)
if i > len(r):
return
# Accumulate x into r
for j in range(N_ADDITIONS):
r[i] += x[i]
# This function will be used to launch the kernel from each thread on its
# own unique data.
def kernel_thread(n):
f[n_blocks, n_threads, stream](rs[n], xs[n])
# Create threads
threads = [threading.Thread(target=kernel_thread, args=(i,))
for i in range(N_THREADS)]
# Start all threads
for thread in threads:
thread.start()
# Wait for all threads to finish, to ensure that we don't synchronize with
# the device until all kernels are scheduled.
for thread in threads:
thread.join()
# Synchronize with the device
cuda.synchronize()
# Check output is as expected
expected = x * N_ADDITIONS
for i in range(N_THREADS):
np.testing.assert_equal(rs[i].copy_to_host(), expected)
# Return the driver log output to the calling process for checking
handler.flush()
return logbuf.getvalue()
def child_test_wrapper(result_queue):
try:
output = child_test()
success = True
# Catch anything raised so it can be propagated
except: # noqa: E722
output = traceback.format_exc()
success = False
result_queue.put((success, output))
@skip_on_cudasim('Streams not supported on the simulator')
class TestPTDS(CUDATestCase):
@xfail_with_cuda_python
def test_ptds(self):
# Run a test with PTDS enabled in a child process
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
proc = ctx.Process(target=child_test_wrapper, args=(result_queue,))
proc.start()
proc.join()
success, output = result_queue.get()
# Ensure the child process ran to completion before checking its output
if not success:
self.fail(output)
# Functions with a per-thread default stream variant that we expect to
# see in the output
ptds_functions = ('cuMemcpyHtoD_v2_ptds', 'cuLaunchKernel_ptsz',
'cuMemcpyDtoH_v2_ptds')
for fn in ptds_functions:
with self.subTest(fn=fn, expected=True):
self.assertIn(fn, output)
# Non-PTDS versions of the functions that we should not see in the
# output:
legacy_functions = ('cuMemcpyHtoD_v2', 'cuLaunchKernel',
'cuMemcpyDtoH_v2')
for fn in legacy_functions:
with self.subTest(fn=fn, expected=False):
# Ensure we only spot these function names appearing without a
# _ptds or _ptsz suffix by checking including the end of the
# line in the log
fn_at_end = f'{fn}\n'
self.assertNotIn(fn_at_end, output)
if __name__ == '__main__':
unittest.main()
|
196669
|
from __future__ import annotations
from pydantic import BaseModel
class OsuBeatmapRequestForm(BaseModel):
Filenames: list[str]
Ids: list[int]
|
196690
|
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs) == 0:
return ''
def getCommonPrefix(s1, s2):
result = []
for i in range(min(len(s1), len(s2))):
if s1[i] == s2[i]:
result.append(s1[i])
else:
break
return ''.join(result)
commonPrefix = strs[0]
for i in range(1, len(strs)):
commonPrefix = getCommonPrefix(commonPrefix, strs[i])
return commonPrefix
|
196710
|
import xml.etree.ElementTree as ET
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
def is_bazel_run():
return "TEST_WORKSPACE" in os.environ
def read_file(path):
f = open(path, "r")
content = f.read()
f.close()
return content
def parse_xml(xml_str):
return ET.fromstring(xml_str)
def xpath_list(xml_str, xpath):
return parse_xml(xml_str).findall(xpath)
# unfortunately ElementTree doesn't handle returning xpath attribute values, this is a workaround.
def xpath_attribute_list(xml_str, xpath, attribute_name):
return list(map(lambda e: e.get(attribute_name), parse_xml(xml_str).findall(xpath)))
def generated_file_path(relative_path):
return relative_path \
if is_bazel_run() \
else os.path.join(dir_path, "../../bazel-bin/%s" % relative_path)
def load_archive(intellij_files_archive_path):
intellij_files_archive_path = generated_file_path(intellij_files_archive_path)
entries = read_file(intellij_files_archive_path) \
.split("__SYMLINK_DIVIDER__\n", 1)[0] \
.split("__SHA1_DIVIDER__\n", 1)[1] \
.split("\n__FILE_DIVIDER__\n")
relative_path_to_content = {}
for entry in entries:
parts = entry.split("\n", 1)
relative_path_to_content[parts[0]] = parts[1]
return relative_path_to_content
def find_all_plain_jar_libraries(iml_content):
return list(map(lambda e: e.find("./library/CLASSES/root").get("url"),
filter(lambda e: e.get("type") == "module-library" and "scope" not in e.keys(),
parse_xml(iml_content).findall("./component/orderEntry"))))
def find_all_test_jar_libraries(iml_content):
return list(map(lambda e: e.find("./library/CLASSES/root").get("url"),
filter(lambda e: e.get("type") == "module-library" and e.get("scope") == "TEST",
parse_xml(iml_content).findall("./component/orderEntry"))))
def junit5_jars():
return [
"jar://${BAZEL_INFO_EXECUTION_ROOT}/external/org_apiguardian_apiguardian_api/jar/apiguardian-api-1.0.0.jar!/",
"jar://${BAZEL_INFO_EXECUTION_ROOT}/external/org_junit_jupiter_junit_jupiter_api/jar/junit-jupiter-api-5.0.1.jar!/",
"jar://${BAZEL_INFO_EXECUTION_ROOT}/external/org_junit_platform_junit_platform_commons/jar/junit-platform-commons-1.0.1.jar!/",
"jar://${BAZEL_INFO_EXECUTION_ROOT}/external/org_junit_platform_junit_platform_engine/jar/junit-platform-engine-1.0.1.jar!/",
"jar://${BAZEL_INFO_EXECUTION_ROOT}/external/org_junit_platform_junit_platform_launcher/jar/junit-platform-launcher-1.0.1.jar!/",
"jar://${BAZEL_INFO_EXECUTION_ROOT}/external/org_opentest4j_opentest4j/jar/opentest4j-1.0.0.jar!/"]
|
196746
|
from django.db import models
from multiselectfield.db import fields as multiselect
from .choices import MEDAL_TYPES, MEDIA_CHOICES, YEAR_IN_SCHOOL_CHOICES
class ModelA(models.Model):
year_in_school = models.CharField(max_length=2, blank=True, choices=YEAR_IN_SCHOOL_CHOICES)
class ModelB(models.Model):
year_in_school = models.CharField(max_length=2, blank=True, choices=YEAR_IN_SCHOOL_CHOICES[:-1])
media = models.CharField(max_length=10, blank=True, choices=MEDIA_CHOICES)
class ModelC(models.Model):
medals = multiselect.MultiSelectField(blank=True, choices=MEDAL_TYPES)
media = models.CharField(max_length=10, blank=True, choices=MEDIA_CHOICES)
|
196766
|
from threading import Semaphore
class ZeroEvenOdd:
def __init__(self, n):
self.n = n
self.zero_gate = Semaphore(1)
self.even_gate = Semaphore(0)
self.odd_gate = Semaphore(0)
# printNumber(x) outputs "x", where x is an integer.
def zero(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(self.n):
self.zero_gate.acquire()
printNumber(0)
if i & 1:
self.even_gate.release()
else:
self.odd_gate.release()
def even(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(2, self.n + 1, 2):
self.even_gate.acquire()
printNumber(i)
self.zero_gate.release()
def odd(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n + 1, 2):
self.odd_gate.acquire()
printNumber(i)
self.zero_gate.release()
|
196815
|
from __future__ import absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
def Factory(settings, Model):
if( not isinstance(settings, KratosMultiphysics.Parameters) ):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyMassConservationCheckProcess( Model, settings["Parameters"] )
class ApplyMassConservationCheckProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings):
KratosMultiphysics.Process.__init__(self)
default_parameters = KratosMultiphysics.Parameters( """
{
"model_part_name" : "default_model_part_name",
"perform_corrections" : true,
"correction_frequency_in_time_steps" : 20,
"write_to_log_file" : true,
"log_file_name" : "mass_conservation.log"
} """ )
settings.ValidateAndAssignDefaults(default_parameters)
self._fluid_model_part = Model[settings["model_part_name"].GetString()]
self._write_to_log = settings["write_to_log_file"].GetBool()
self._my_log_file = settings["log_file_name"].GetString()
self._is_printing_rank = ( self._fluid_model_part.GetCommunicator().MyPID() == 0 )
self.mass_conservation_check_process = KratosFluid.MassConservationCheckProcess(self._fluid_model_part, settings)
KratosMultiphysics.Logger.PrintInfo("ApplyMassConservationCheckProcess","Construction finished.")
def ExecuteInitialize(self):
first_lines_string = self.mass_conservation_check_process.Initialize()
# writing first line in file
if ( self._write_to_log and self._is_printing_rank ):
with open(self._my_log_file, "w") as logFile:
logFile.write( first_lines_string )
KratosMultiphysics.Logger.PrintInfo("ApplyMassConservationCheckProcess","Initialization finished (initial volumes calculated).")
def ExecuteBeforeSolutionLoop(self):
self.mass_conservation_check_process.ExecuteBeforeSolutionLoop()
def ExecuteInitializeSolutionStep(self):
self.mass_conservation_check_process.ExecuteInitializeSolutionStep()
def ExecuteFinalizeSolutionStep(self):
log_line_string = self.mass_conservation_check_process.ExecuteInTimeStep()
# writing first line in file
if ( self._write_to_log and self._is_printing_rank ):
with open(self._my_log_file, "a+") as logFile:
logFile.write( log_line_string )
|
196817
|
import sys
sys.path.append("Mask_RCNN")
import os
import sys
import glob
import osmmodelconfig
import skimage
import math
import imagestoosm.config as osmcfg
import model as modellib
import visualize as vis
import numpy as np
import csv
import QuadKey.quadkey as quadkey
import shapely.geometry as geometry
import shapely.affinity as affinity
import matplotlib.pyplot as plt
import cv2
import scipy.optimize
import time
from skimage import draw
from skimage import io
showFigures = False
def toDegrees(rad):
return rad * 180/math.pi
def writeOSM( osmFileName,featureName, simpleContour,tilePixel, qkRoot) :
with open(osmFileName,"wt",encoding="ascii") as f:
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write("<osm version=\"0.6\">\n")
id = -1
for pt in simpleContour :
geo = quadkey.TileSystem.pixel_to_geo( (pt[0,0]+tilePixel[0],pt[0,1]+tilePixel[1]),qkRoot.level)
f.write(" <node id=\"{}\" lat=\"{}\" lon=\"{}\" />\n".format(id,geo[0],geo[1]))
id -= 1
f.write(" <way id=\"{}\" visible=\"true\">\n".format(id))
id = -1
for pt in simpleContour :
f.write(" <nd ref=\"{}\" />\n".format(id))
id -= 1
f.write(" <nd ref=\"{}\" />\n".format(-1))
f.write(" <tag k=\"{}\" v=\"{}\" />\n".format("leisure","pitch"))
f.write(" <tag k=\"{}\" v=\"{}\" />\n".format("sport",featureName))
f.write(" </way>\n")
f.write("</osm>\n")
f.close
def writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth) :
nPts = int(finalShape.length)
if ( nPts > 5000) :
nPts = 5000
fitContour = np.zeros((nPts,1,2), dtype=np.int32)
if ( nPts > 3):
for t in range(0,nPts) :
pt = finalShape.interpolate(t)
fitContour[t,0,0] = pt.x
fitContour[t,0,1] = pt.y
fitContour = [ fitContour ]
fitContour = [ cv2.approxPolyDP(cnt,2,True) for cnt in fitContour]
image = np.copy(imageNoMasks)
cv2.drawContours(image, fitContour,-1, (0,255,0), 2)
if ( showFigures ):
fig.add_subplot(2,2,3)
plt.title(featureName + " " + str(r['scores'][i]) + " Fit")
plt.imshow(image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth])
while ( os.path.exists( "anomaly/add/{0:06d}.osm".format(wayNumber) )) :
wayNumber += 1
debugFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.jpg".format(wayNumber))
io.imsave(debugFileName,image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth],quality=100)
osmFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.osm".format(wayNumber))
writeOSM( osmFileName,featureName, fitContour[0],tilePixel, qkRoot)
if (showFigures ):
plt.show(block=False)
plt.pause(0.05)
return wayNumber
ROOT_DIR_ = os.path.dirname(os.path.realpath(sys.argv[0]))
MODEL_DIR = os.path.join(ROOT_DIR_, "logs")
class InferenceConfig(osmmodelconfig.OsmModelConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
ROOT_DIR = ROOT_DIR_
inference_config = InferenceConfig()
fullTrainingDir = os.path.join( ROOT_DIR_, osmcfg.trainDir,"*")
fullImageList = []
for imageDir in glob.glob(fullTrainingDir):
if ( os.path.isdir( os.path.join( fullTrainingDir, imageDir) )):
id = os.path.split(imageDir)[1]
fullImageList.append( id)
# Training dataset
dataset_full = osmmodelconfig.OsmImagesDataset(ROOT_DIR_)
dataset_full.load(fullImageList, inference_config.IMAGE_SHAPE[0], inference_config.IMAGE_SHAPE[1])
dataset_full.prepare()
inference_config.display()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]
print(model_path)
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
print("Reading in OSM data")
# load up the OSM features into hash of arrays of polygons, in pixels
features = {}
for classDir in os.listdir(osmcfg.rootOsmDir) :
classDirFull = os.path.join( osmcfg.rootOsmDir,classDir)
for fileName in os.listdir(classDirFull) :
fullPath = os.path.join( osmcfg.rootOsmDir,classDir,fileName)
with open(fullPath, "rt") as csvfile:
csveader = csv.reader(csvfile, delimiter='\t')
pts = []
for row in csveader:
latLot = (float(row[0]),float(row[1]))
pixel = quadkey.TileSystem.geo_to_pixel(latLot,osmcfg.tileZoom)
pts.append(pixel)
feature = {
"geometry" : geometry.Polygon(pts),
"filename" : fullPath
}
if ( (classDir in features) == False) :
features[classDir] = []
features[classDir].append( feature )
# make the output dirs, a fresh start is possible just by deleting anomaly
if ( not os.path.isdir("anomaly")) :
os.mkdir("anomaly")
if ( not os.path.isdir("anomaly/add")) :
os.mkdir("anomaly/add")
if ( not os.path.isdir("anomaly/replace")) :
os.mkdir("anomaly/replace")
if ( not os.path.isdir("anomaly/overlap")) :
os.mkdir("anomaly/overlap")
fig = {}
if ( showFigures):
fig = plt.figure()
wayNumber = 0
startTime = time.time()
count = 1
for image_index in dataset_full.image_ids :
currentTime = time.time()
howLong = currentTime-startTime
secPerImage = howLong/count
imagesLeft = len(dataset_full.image_ids)-count
timeLeftHrs = (imagesLeft*secPerImage)/3600.0
print("Processing {} of {} {:2.1f} hrs left".format(count,len(dataset_full.image_ids),timeLeftHrs))
count += 1
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_full, inference_config,image_index, use_mini_mask=False)
info = dataset_full.image_info[image_index]
# get the pixel location for this training image.
metaFileName = os.path.join( inference_config.ROOT_DIR, osmcfg.trainDir,info['id'],info['id']+".txt")
quadKeyStr = ""
with open(metaFileName) as metafile:
quadKeyStr = metafile.readline()
quadKeyStr = quadKeyStr.strip()
qkRoot = quadkey.from_str(quadKeyStr)
tilePixel = quadkey.TileSystem.geo_to_pixel(qkRoot.to_geo(), qkRoot.level)
# run the network
results = model.detect([image], verbose=0)
r = results[0]
maxImageSize = 256*3
featureMask = np.zeros((maxImageSize, maxImageSize), dtype=np.uint8)
pts = []
pts.append( ( tilePixel[0]+0,tilePixel[1]+0 ) )
pts.append( ( tilePixel[0]+0,tilePixel[1]+maxImageSize ) )
pts.append( ( tilePixel[0]+maxImageSize,tilePixel[1]+maxImageSize ) )
pts.append( ( tilePixel[0]+maxImageSize,tilePixel[1]+0 ) )
imageBoundingBoxPoly = geometry.Polygon(pts)
foundFeatures = {}
for featureType in osmmodelconfig.featureNames.keys() :
foundFeatures[featureType ] = []
for feature in features[featureType] :
if ( imageBoundingBoxPoly.intersects( feature['geometry']) ) :
xs, ys = feature['geometry'].exterior.coords.xy
outOfRangeCount = len([ x for x in xs if x < tilePixel[0] or x >= tilePixel[0]+maxImageSize ])
outOfRangeCount += len([ y for y in ys if y < tilePixel[1] or y >= tilePixel[1]+maxImageSize ])
if ( outOfRangeCount == 0) :
foundFeatures[featureType ].append( feature)
# draw black lines showing where osm data is
for featureType in osmmodelconfig.featureNames.keys() :
for feature in foundFeatures[featureType] :
xs, ys = feature['geometry'].exterior.coords.xy
xs = [ x-tilePixel[0] for x in xs]
ys = [ y-tilePixel[1] for y in ys]
rr, cc = draw.polygon_perimeter(xs,ys,(maxImageSize,maxImageSize))
image[cc,rr] = 0
imageNoMasks = np.copy(image)
for i in range( len(r['class_ids'])) :
mask = r['masks'][:,:,i]
edgePixels = 15
outside = np.sum( mask[0:edgePixels,:]) + np.sum( mask[-edgePixels:-1,:]) + np.sum( mask[:,0:edgePixels]) + np.sum( mask[:,-edgePixels:-1])
image = np.copy(imageNoMasks)
if ( r['scores'][i] > 0.98 and outside == 0 ) :
featureFound = False
for featureType in osmmodelconfig.featureNames.keys() :
for feature in foundFeatures[featureType] :
classId = osmmodelconfig.featureNames[featureType]
if ( classId == r['class_ids'][i] ) :
xs, ys = feature['geometry'].exterior.coords.xy
xs = [ x-tilePixel[0] for x in xs]
ys = [ y-tilePixel[1] for y in ys]
xsClipped = [ min( max( x,0),maxImageSize) for x in xs]
ysClipped = [ min( max( y,0),maxImageSize) for y in ys]
featureMask.fill(0)
rr, cc = draw.polygon(xs,ys,(maxImageSize,maxImageSize))
featureMask[cc,rr] = 1
maskAnd = featureMask * mask
overlap = np.sum(maskAnd )
if ( outside == 0 and overlap > 0) :
featureFound = True
if ( featureFound == False) :
weight = 0.25
# get feature name
featureName = ""
for featureType in osmmodelconfig.featureNames.keys() :
if ( osmmodelconfig.featureNames[featureType] == r['class_ids'][i] ) :
featureName = featureType
#if ( r['class_ids'][i] == 1):
# vis.apply_mask(image,mask,[weight,0,0])
#if ( r['class_ids'][i] == 2):
# vis.apply_mask(image,mask,[weight,weight,0])
#if ( r['class_ids'][i] == 3):
# vis.apply_mask(image,mask,[0.0,0,weight])
mask = mask.astype(np.uint8)
mask = mask * 255
ret,thresh = cv2.threshold(mask,127,255,0)
im2, rawContours,h = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
bbLeft,bbTop,bbWidth,bbHeight = cv2.boundingRect(rawContours[0])
bbBuffer = 75
bbLeft = max(bbLeft-bbBuffer,0)
bbRight = min(bbLeft+2*bbBuffer+bbWidth,maxImageSize)
bbWidth = bbRight-bbLeft
bbTop = max(bbTop-bbBuffer,0)
bbBottom = min(bbTop+2*bbBuffer+bbHeight,maxImageSize-1)
bbHeight = bbBottom-bbTop
image = np.copy(imageNoMasks)
cv2.drawContours(image, rawContours,-1, (0,255,0), 2)
if ( showFigures ):
fig.add_subplot(2,2,1)
plt.title(featureName + " " + str(r['scores'][i]) + " Raw")
plt.imshow(image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth])
simpleContour = [ cv2.approxPolyDP(cnt,5,True) for cnt in rawContours]
image = np.copy(imageNoMasks)
cv2.drawContours(image, simpleContour,-1, (0,255,0), 2)
if ( showFigures ):
fig.add_subplot(2,2,2)
plt.title(featureName + " " + str(r['scores'][i]) + " Simplify")
plt.imshow(image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth])
simpleContour = simpleContour[0]
print(" {}".format(featureName))
if ( featureName == "baseball" and isinstance(simpleContour,np.ndarray) ):
while ( os.path.exists( "anomaly/add/{0:06d}.osm".format(wayNumber) )) :
wayNumber += 1
debugFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.jpg".format(wayNumber))
io.imsave(debugFileName,image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth],quality=100)
osmFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.osm".format(wayNumber))
writeOSM( osmFileName,featureName, simpleContour,tilePixel, qkRoot)
fitContour = simpleContour
if ( featureName == 'baseball' ) :
def makePie(paramsX):
centerX,centerY,width,angle = paramsX
pts = []
pts.append((0,0))
pts.append((width,0))
step = math.pi/10
r = step
while r < math.pi/2:
x = math.cos(r)*width
y = math.sin(r)*width
pts.append( (x,y) )
r += step
pts.append( (0,width))
pts.append( (0,0))
fitShape = geometry.LineString(pts)
fitShape = affinity.translate(fitShape, -width/2,-width/2 )
fitShape = affinity.rotate(fitShape,angle )
fitShape = affinity.translate(fitShape, centerX,centerY )
return fitShape
def fitPie(paramsX):
fitShape = makePie(paramsX)
huberCutoff = 5
sum = 0
for cnt in rawContours:
for pt in cnt:
p = geometry.Point(pt[0])
d = p.distance(fitShape)
if ( d < huberCutoff) :
sum += 0.5 * d * d
else:
sum += huberCutoff*(math.fabs(d)-0.5*huberCutoff)
return sum
cm = np.mean( rawContours[0],axis=0)
results = []
angleStepCount = 8
for angleI in range(angleStepCount):
centerX = cm[0,0]
centerY = cm[0,1]
width = math.sqrt(cv2.contourArea(rawContours[0]))
angle = 360 * float(angleI)/angleStepCount
x0 = np.array([centerX,centerY,width,angle ])
resultR = scipy.optimize.minimize(fitPie, x0, method='nelder-mead', options={'xtol': 1e-6,'maxiter':50 })
results.append(resultR)
bestScore = 1e100
bestResult = {}
for result in results:
if result.fun < bestScore :
bestScore = result.fun
bestResult = result
bestResult = scipy.optimize.minimize(fitPie, bestResult.x, method='nelder-mead', options={'xtol': 1e-6 })
finalShape = makePie(bestResult.x)
wayNumber = writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth)
for result in results:
angle = result.x[3]
angleDelta = int(math.fabs(result.x[3]-bestResult.x[3])) % 360
if result.fun < 1.2*bestScore and angleDelta > 45 :
result = scipy.optimize.minimize(fitPie, result.x, method='nelder-mead', options={'xtol': 1e-6 })
finalShape = makePie(result.x)
wayNumber = writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth)
else:
def makeRect(paramsX):
centerX,centerY,width,height,angle = paramsX
pts = [
(-width/2,height/2),
(width/2,height/2),
(width/2,-height/2),
(-width/2,-height/2),
(-width/2,height/2)]
fitShape = geometry.LineString(pts)
fitShape = affinity.rotate(fitShape, angle,use_radians=True )
fitShape = affinity.translate(fitShape, centerX,centerY )
return fitShape
def fitRect(paramsX):
fitShape = makeRect(paramsX)
sum = 0
for cnt in rawContours:
for pt in cnt:
p = geometry.Point(pt[0])
d = p.distance(fitShape)
sum += d*d
return sum
cm = np.mean( rawContours[0],axis=0)
result = {}
angleStepCount = 8
for angleI in range(angleStepCount):
centerX = cm[0,0]
centerY = cm[0,1]
width = math.sqrt(cv2.contourArea(rawContours[0]))
height = width
angle = 2*math.pi * float(angleI)/angleStepCount
x0 = np.array([centerX,centerY,width,height,angle ])
resultR = scipy.optimize.minimize(fitRect, x0, method='nelder-mead', options={'xtol': 1e-6,'maxiter':50 })
if ( angleI == 0):
result = resultR
if ( resultR.fun < result.fun):
result = resultR
#print("{} {}".format(angle * 180.0 / math.pi,resultR.fun ))
resultR = scipy.optimize.minimize(fitRect, resultR.x, method='nelder-mead', options={'xtol': 1e-6 })
#print(result)
finalShape = makeRect(result.x)
wayNumber = writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth)
|
196824
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
class TensorStandardScaler:
"""Helper class for automatically normalizing inputs into the network.
"""
def __init__(self, x_dim, suffix):
"""Initializes a scaler.
Arguments:
x_dim (int): The dimensionality of the inputs into the scaler.
Returns: None.
"""
self.fitted = False
with tf.variable_scope("Scaler"):
self.mu = tf.get_variable(
name="scaler_mu" + suffix, shape=[1, x_dim], initializer=tf.constant_initializer(0.0),
trainable=False
)
self.sigma = tf.get_variable(
name="scaler_std" + suffix, shape=[1, x_dim], initializer=tf.constant_initializer(1.0),
trainable=False
)
self.cached_mu, self.cached_sigma = np.zeros([0, x_dim]), np.ones([1, x_dim])
def fit(self, data):
"""Runs two ops, one for assigning the mean of the data to the internal mean, and
another for assigning the standard deviation of the data to the internal standard deviation.
This function must be called within a 'with <session>.as_default()' block.
Arguments:
data (np.ndarray): A numpy array containing the input
Returns: None.
"""
mu = np.mean(data, axis=0, keepdims=True)
sigma = np.std(data, axis=0, keepdims=True)
sigma[sigma < 1e-12] = 1.0
self.mu.load(mu)
self.sigma.load(sigma)
self.fitted = True
self.cache()
def transform(self, data):
"""Transforms the input matrix data using the parameters of this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return (data - self.mu) / self.sigma
def inverse_transform(self, data):
"""Undoes the transformation performed by this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return self.sigma * data + self.mu
def get_vars(self):
"""Returns a list of variables managed by this object.
Returns: (list<tf.Variable>) The list of variables.
"""
return [self.mu, self.sigma]
def cache(self):
"""Caches current values of this scaler.
Returns: None.
"""
self.cached_mu = self.mu.eval()
self.cached_sigma = self.sigma.eval()
def load_cache(self):
"""Loads values from the cache
Returns: None.
"""
self.mu.load(self.cached_mu)
self.sigma.load(self.cached_sigma)
|
196827
|
from __future__ import print_function
import numpy as np
import math
from scipy.misc import logsumexp
import torch
import torch.utils.data
import torch.nn as nn
from torch.nn import Linear
from torch.autograd import Variable
from ..utils.distributions import log_Bernoulli, log_Normal_diag, log_Normal_standard, log_Logistic_256
from ..utils.visual_evaluation import plot_histogram
from ..utils.nn import he_init, GatedDense, NonLinear
from .Model import Model
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#=======================================================================================================================
class VAE(Model):
def __init__(self, args):
super(VAE, self).__init__(args)
# encoder: q(z | x)
self.q_z_layers = nn.Sequential(
GatedDense(np.prod(self.args.input_size), 300),
GatedDense(300, 300)
)
self.q_z_mean = Linear(300, self.args.z1_size)
self.q_z_logvar = NonLinear(300, self.args.z1_size, activation=nn.Hardtanh(min_val=-6., max_val=2.))
# decoder: p(x | z)
self.p_x_layers = nn.Sequential(
GatedDense(self.args.z1_size, 300),
GatedDense(300, 300)
)
if self.args.input_type == 'binary':
self.p_x_mean = NonLinear(300, np.prod(self.args.input_size), activation=nn.Sigmoid())
elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
self.p_x_mean = NonLinear(300, np.prod(self.args.input_size), activation=nn.Sigmoid())
self.p_x_logvar = NonLinear(300, np.prod(self.args.input_size),
activation=nn.Hardtanh(min_val=-4.5, max_val=0))
# weights initialization
for m in self.modules():
if isinstance(m, nn.Linear):
he_init(m)
# add pseudo-inputs if VampPrior
if self.args.prior == 'vampprior':
self.add_pseudoinputs()
# AUXILIARY METHODS
def calculate_loss(self, x, beta=1., average=False):
'''
:param x: input image(s)
:param beta: a hyperparam for warmup
:param average: whether to average loss or not
:return: value of a loss function
'''
# pass through VAE
x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)
# RE
if self.args.input_type == 'binary':
RE = log_Bernoulli(x, x_mean, dim=1)
elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
else:
raise Exception('Wrong input type!')
# KL
log_p_z = self.log_p_z(z_q)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = -(log_p_z - log_q_z)
loss = - RE + beta * KL
if average:
loss = torch.mean(loss)
RE = torch.mean(RE)
KL = torch.mean(KL)
return loss, RE, KL
def calculate_likelihood(self, X, dir, mode='test', S=5000, MB=100):
# set auxiliary variables for number of training and test sets
N_test = X.size(0)
# init list
likelihood_test = []
if S <= MB:
R = 1
else:
R = S / MB
S = MB
for j in range(N_test):
if j % 100 == 0:
print('{:.2f}%'.format(j / (1. * N_test) * 100))
# Take x*
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, int(R)):
# Repeat it for all training points
x = x_single.expand(S, x_single.size(1))
a_tmp, _, _ = self.calculate_loss(x)
a.append(-a_tmp.cpu().data.numpy())
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp(a)
likelihood_test.append(likelihood_x - np.log(len(a)))
likelihood_test = np.array(likelihood_test)
plot_histogram(-likelihood_test, dir, mode)
return -np.mean(likelihood_test)
def calculate_lower_bound(self, X_full, MB=100):
# CALCULATE LOWER BOUND:
lower_bound = 0.
RE_all = 0.
KL_all = 0.
I = int(math.ceil(X_full.size(0) / MB))
for i in range(I):
x = X_full[i * MB: (i + 1) * MB].view(-1, np.prod(self.args.input_size))
loss, RE, KL = self.calculate_loss(x, average=True)
RE_all += RE.cpu().data[0]
KL_all += KL.cpu().data[0]
lower_bound += loss.cpu().data[0]
lower_bound /= I
return lower_bound
# ADDITIONAL METHODS
def generate_x(self, N=25):
if self.args.prior == 'standard':
z_sample_rand = Variable(torch.FloatTensor(N, self.args.z1_size).normal_())
if self.args.cuda:
z_sample_rand = z_sample_rand.cuda()
elif self.args.prior == 'vampprior':
means = self.means(self.idle_input)[0:N]
z_sample_gen_mean, z_sample_gen_logvar = self.q_z(means)
z_sample_rand = self.reparameterize(z_sample_gen_mean, z_sample_gen_logvar)
samples_rand, _ = self.p_x(z_sample_rand)
return samples_rand
def reconstruct_x(self, x):
x_mean, _, _, _, _ = self.forward(x)
return x_mean
# THE MODEL: VARIATIONAL POSTERIOR
def q_z(self, x):
x = self.q_z_layers(x)
z_q_mean = self.q_z_mean(x)
z_q_logvar = self.q_z_logvar(x)
return z_q_mean, z_q_logvar
# THE MODEL: GENERATIVE DISTRIBUTION
def p_x(self, z):
z = self.p_x_layers(z)
x_mean = self.p_x_mean(z)
if self.args.input_type == 'binary':
x_logvar = 0.
else:
x_mean = torch.clamp(x_mean, min=0. + 1. / 512., max=1. - 1. / 512.)
x_logvar = self.p_x_logvar(z)
return x_mean, x_logvar
# the prior
def log_p_z(self, z):
if self.args.prior == 'standard':
log_prior = log_Normal_standard(z, dim=1)
elif self.args.prior == 'vampprior':
# z - MB x M
C = self.args.number_components
# calculate params
X = self.means(self.idle_input)
# calculate params for given data
z_p_mean, z_p_logvar = self.q_z(X) # C x M
# expand z
z_expand = z.unsqueeze(1)
means = z_p_mean.unsqueeze(0)
logvars = z_p_logvar.unsqueeze(0)
a = log_Normal_diag(z_expand, means, logvars, dim=2) - math.log(C) # MB x C
a_max, _ = torch.max(a, 1) # MB x 1
# calculte log-sum-exp
log_prior = a_max + torch.log(torch.sum(torch.exp(a - a_max.unsqueeze(1)), 1)) # MB x 1
else:
raise Exception('Wrong name of the prior!')
return log_prior
# THE MODEL: FORWARD PASS
def forward(self, x):
# z ~ q(z | x)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x_mean = p(x|z)
x_mean, x_logvar = self.p_x(z_q)
return x_mean, x_logvar, z_q, z_q_mean, z_q_logvar
|
196860
|
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import ElectraModel, ElectraPreTrainedModel
import torch.nn.functional as F
from enum import Enum
from bert_insert_model import BertInsertion
from bert_delete_model_3 import BertDeletion
from bert_replace_model_2 import BertReplace
from torch.utils.data import Dataset
# class Flag(Enum):
# insert = "insert"
# delete = "delete"
# relace = "replace"
class ElectraWIKIpretrainmodel(ElectraPreTrainedModel):
def __init__(self, config):
super(ElectraWIKIpretrainmodel, self).__init__(config)
self.config = config
# self.num_labels = config.num_labels
self.electra = ElectraModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.bertinsert = BertInsertion(self.config)
self.bertdelete = BertDeletion(self.config)
self.bertreplace = BertReplace(self.config)
self.init_weights()
def forward(
self,
task,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
sep_positions=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.electra(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0] # batch x seglen x dim
if task == "insert":
logits, loss = self.bertinsert(sequence_output, sep_positions, labels)
elif task == "delete_v3":
logits, loss = self.bertdelete(sequence_output, sep_positions, labels)
else:
logits, loss = self.bertreplace(sequence_output, sep_positions, labels)
return logits, loss
|
196871
|
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
sanitized_Request,
)
class CollegeRamaIE(InfoExtractor):
_VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)'
_TESTS = [
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
'info_dict': {
'id': '585a43626e544bdd97aeb71a0ec907a01d',
'ext': 'mp4',
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
'description': '',
'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$',
'duration': 7713.088,
'timestamp': 1413309600,
'upload_date': '20141014',
},
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
'md5': 'ef1fdded95bdf19b12c5999949419c92',
'info_dict': {
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
'ext': 'wmv',
'title': '64ste Vakantiecursus: Afvalwater',
'description': 'md5:7fd774865cc69d972f542b157c328305',
'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$',
'duration': 10853,
'timestamp': 1326446400,
'upload_date': '20120113',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
player_options_request = {
'getPlayerOptionsRequest': {
'ResourceId': video_id,
'QueryString': '',
}
}
request = sanitized_Request(
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
json.dumps(player_options_request))
request.add_header('Content-Type', 'application/json')
player_options = self._download_json(request, video_id)
presentation = player_options['d']['Presentation']
title = presentation['Title']
description = presentation.get('Description')
thumbnail = None
duration = float_or_none(presentation.get('Duration'), 1000)
timestamp = int_or_none(presentation.get('UnixTime'), 1000)
formats = []
for stream in presentation['Streams']:
for video in stream['VideoUrls']:
thumbnail_url = stream.get('ThumbnailUrl')
if thumbnail_url:
thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url
format_id = video['MediaType']
if format_id == 'SS':
continue
formats.append({
'url': video['Location'],
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
|
196882
|
import numpy as np
import torch
from bisect import bisect_left
class TinyImages(torch.utils.data.Dataset):
def __init__(self, transform=None, exclude_cifar=True):
data_file = open('datasets/unlabeled_datasets/80M_Tiny_Images/tiny_images.bin', "rb")
def load_image(idx):
data_file.seek(idx * 3072)
data = data_file.read(3072)
return np.fromstring(data, dtype='uint8').reshape(32, 32, 3, order="F")
self.load_image = load_image
self.offset = 0 # offset index
self.transform = transform
self.exclude_cifar = exclude_cifar
if exclude_cifar:
self.cifar_idxs = []
with open('datasets/unlabeled_datasets/80M_Tiny_Images/80mn_cifar_idxs.txt', 'r') as idxs:
for idx in idxs:
# indices in file take the 80mn database to start at 1, hence "- 1"
self.cifar_idxs.append(int(idx) - 1)
# hash table option
self.cifar_idxs = set(self.cifar_idxs)
self.in_cifar = lambda x: x in self.cifar_idxs
# bisection search option
# self.cifar_idxs = tuple(sorted(self.cifar_idxs))
#
# def binary_search(x, hi=len(self.cifar_idxs)):
# pos = bisect_left(self.cifar_idxs, x, 0, hi) # find insertion position
# return True if pos != hi and self.cifar_idxs[pos] == x else False
#
# self.in_cifar = binary_search
def __getitem__(self, index):
index = (index + self.offset) % 79302016
if self.exclude_cifar:
while self.in_cifar(index):
index = np.random.randint(79302017)
img = self.load_image(index)
if self.transform is not None:
img = self.transform(img)
return img, 0 # 0 is the class
def __len__(self):
return 79302017
|
196944
|
from flask import Blueprint, current_app, json, jsonify, request
from app.celery.process_sms_client_response_tasks import (
process_sms_client_response,
)
from app.config import QueueNames
from app.errors import InvalidRequest, register_errors
sms_callback_blueprint = Blueprint("sms_callback", __name__, url_prefix="/notifications/sms")
register_errors(sms_callback_blueprint)
@sms_callback_blueprint.route('/mmg', methods=['POST'])
def process_mmg_response():
client_name = 'MMG'
data = json.loads(request.data)
errors = validate_callback_data(data=data,
fields=['status', 'CID'],
client_name=client_name)
if errors:
raise InvalidRequest(errors, status_code=400)
status = str(data.get('status'))
detailed_status_code = str(data.get('substatus'))
provider_reference = data.get('CID')
process_sms_client_response.apply_async(
[status, provider_reference, client_name, detailed_status_code],
queue=QueueNames.SMS_CALLBACKS,
)
safe_to_log = data.copy()
safe_to_log.pop("MSISDN")
current_app.logger.debug(
f"Full delivery response from {client_name} for notification: {provider_reference}\n{safe_to_log}"
)
return jsonify(result='success'), 200
@sms_callback_blueprint.route('/firetext', methods=['POST'])
def process_firetext_response():
client_name = 'Firetext'
errors = validate_callback_data(data=request.form,
fields=['status', 'reference'],
client_name=client_name)
if errors:
raise InvalidRequest(errors, status_code=400)
status = request.form.get('status')
detailed_status_code = request.form.get('code')
provider_reference = request.form.get('reference')
safe_to_log = dict(request.form).copy()
safe_to_log.pop('mobile')
current_app.logger.debug(
f"Full delivery response from {client_name} for notification: {provider_reference}\n{safe_to_log}"
)
process_sms_client_response.apply_async(
[status, provider_reference, client_name, detailed_status_code],
queue=QueueNames.SMS_CALLBACKS,
)
return jsonify(result='success'), 200
def validate_callback_data(data, fields, client_name):
errors = []
for f in fields:
if not str(data.get(f, '')):
error = "{} callback failed: {} missing".format(client_name, f)
errors.append(error)
return errors if len(errors) > 0 else None
|
197020
|
from .optimizer import BaseOptimizer
class RandomOptimizer(BaseOptimizer):
def __init__(self, optimization_problem):
super(RandomOptimizer, self).__init__(optimization_problem)
def _generate_samples(self, size):
parameters = self.optimization_problem.parameters
draws = [
parameter.draw(size) if size == 1 else parameter.draw(size)
for parameter in self.optimization_problem.parameters
]
names = [parameter.name for parameter in parameters]
return [{names[i]: value for i, value in enumerate(draw)} for draw in zip(*draws)]
|
197065
|
from kartothek.core.cube.constants import KTK_CUBE_UUID_SEPERATOR
from kartothek.core.dataset import _validate_uuid
def test_uuid_seperator_valid():
assert _validate_uuid(KTK_CUBE_UUID_SEPERATOR)
|
197076
|
from setuptools import setup, find_packages
setup(
name="pgh-bustime",
version='0.8.5',
author='<NAME>',
author_email='<EMAIL>',
packages=['pghbustime'],
url='http://github.com/nhfruchter/pgh-bustime',
license='LICENSE',
description='Python wrapper for the Port Authority of Allegheny County realtime bus information API.',
install_requires=['xmltodict', 'requests', 'pytz']
)
|
197077
|
from __future__ import absolute_import, print_function, division
import numpy
from .type import TypedListType
import theano
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor.type_other import SliceType
from theano import tensor as T
from theano.compile.debugmode import _lessbroken_deepcopy
class _typed_list_py_operators:
def __getitem__(self, index):
return getitem(self, index)
def __len__(self):
return length(self)
def append(self, toAppend):
return append(self, toAppend)
def extend(self, toAppend):
return extend(self, toAppend)
def insert(self, index, toInsert):
return insert(self, index, toInsert)
def remove(self, toRemove):
return remove(self, toRemove)
def reverse(self):
return reverse(self)
def count(self, elem):
return count(self, elem)
# name "index" is already used by an attribute
def ind(self, elem):
return index_(self, elem)
ttype = property(lambda self: self.type.ttype)
dtype = property(lambda self: self.type.ttype.dtype)
ndim = property(lambda self: self.type.ttype.ndim + 1)
class TypedListVariable(_typed_list_py_operators, Variable):
"""
Subclass to add the typed list operators to the basic `Variable` class.
"""
TypedListType.Variable = TypedListVariable
class TypedListConstant(_typed_list_py_operators, Constant):
"""
Subclass to add the typed list operators to the basic `Variable` class.
"""
TypedListType.Constant = TypedListConstant
class GetItem(Op):
# See doc in instance of this Op or function after this class definition.
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, index):
assert isinstance(x.type, TypedListType)
if not isinstance(index, Variable):
if isinstance(index, slice):
index = Constant(SliceType(), index)
return Apply(self, [x, index], [x.type()])
else:
index = T.constant(index, ndim=0, dtype='int64')
return Apply(self, [x, index], [x.ttype()])
if isinstance(index.type, SliceType):
return Apply(self, [x, index], [x.type()])
elif isinstance(index, T.TensorVariable) and index.ndim == 0:
assert index.dtype == 'int64'
return Apply(self, [x, index], [x.ttype()])
else:
raise TypeError('Expected scalar or slice as index.')
def perform(self, node, inputs, outputs):
(x, index) = inputs
(out,) = outputs
if not isinstance(index, slice):
index = int(index)
out[0] = x[index]
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name, index = inp[0], inp[1]
output_name = out[0]
fail = sub['fail']
return """
%(output_name)s = (typeof %(output_name)s) PyList_GetItem( (PyObject*) %(x_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)));
if(%(output_name)s == NULL){
%(fail)s
}
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
getitem = GetItem()
"""
Get specified slice of a typed list.
Parameters
----------
x
Typed list.
index
The index of the value to return from `x`.
"""
class Append(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [1]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 1]}
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert x.ttype == toAppend.type, (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
toAppend = _lessbroken_deepcopy(toAppend)
out[0].append(toAppend)
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, toAppend = inp[0], inp[1]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) %(toAppend)s)){
%(fail)s
};
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
append = Append()
"""
Append an element at the end of another list.
Parameters
----------
x
The base typed list.
y
The element to append to `x`.
"""
class Extend(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [1]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 1]}
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert x.type == toAppend.type
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
if toAppend:
o = out[0]
for i in toAppend:
o.append(_lessbroken_deepcopy(i))
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, toAppend = inp[0], inp[1]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
int i =0;
int length = PyList_GET_SIZE((PyObject*) %(toAppend)s);
if(%(output_name)s==NULL){
%(fail)s
};
for(i; i < length; i++){
if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) PyList_GetItem((PyObject*) %(toAppend)s,i))==-1){
%(fail)s
};
}
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version_(self):
return (1,)
extend = Extend()
"""
Append all elements of a list at the end of another list.
Parameters
----------
x
The typed list to extend.
toAppend
The typed list that will be added at the end of `x`.
"""
class Insert(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [2]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 2]}
self.view_map = {0: [0]}
def make_node(self, x, index, toInsert):
assert isinstance(x.type, TypedListType)
assert x.ttype == toInsert.type
if not isinstance(index, Variable):
index = T.constant(index, ndim=0, dtype='int64')
else:
assert index.dtype == 'int64'
assert isinstance(index, T.TensorVariable) and index.ndim == 0
return Apply(self, [x, index, toInsert], [x.type()])
def perform(self, node, inputs, outputs):
(x, index, toInsert) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
toInsert = _lessbroken_deepcopy(toInsert)
out[0].insert(index, toInsert)
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, index, toInsert = inp[0], inp[1], inp[2]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Insert((PyObject*) %(output_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)), (PyObject*) %(toInsert)s)==-1){
%(fail)s
};
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
insert = Insert()
"""
Insert an element at an index in a typed list.
Parameters
----------
x
The typed list to modify.
index
The index where to put the new element in `x`.
toInsert
The new element to insert.
"""
class Remove(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x, toRemove):
assert isinstance(x.type, TypedListType)
assert x.ttype == toRemove.type
return Apply(self, [x, toRemove], [x.type()])
def perform(self, node, inputs, outputs):
(x, toRemove) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list.
"""
for y in range(out[0].__len__()):
if node.inputs[0].ttype.values_eq(out[0][y], toRemove):
del out[0][y]
break
def __str__(self):
return self.__class__.__name__
remove = Remove()
"""Remove an element from a typed list.
Parameters
----------
x
The typed list to be changed.
toRemove
An element to be removed from the typed list.
We only remove the first instance.
Notes
-----
Python implementation of remove doesn't work when we want to remove an ndarray
from a list. This implementation works in that case.
"""
class Reverse(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [x.type()])
def perform(self, node, inp, outputs):
(out,) = outputs
if not self.inplace:
out[0] = list(inp[0])
else:
out[0] = inp[0]
out[0].reverse()
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name = inp[0]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Reverse((PyObject*) %(output_name)s)==-1){
%(fail)s
};
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
reverse = Reverse()
"""
Reverse the order of a typed list.
Parameters
----------
x
The typed list to be reversed.
"""
class Index(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, inputs, outputs):
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = numpy.asarray(y, dtype=theano.config.floatX)
break
def __str__(self):
return self.__class__.__name__
index_ = Index()
class Count(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, inputs, outputs):
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
out[0] = 0
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] += 1
out[0] = numpy.asarray(out[0], dtype=theano.config.floatX)
def __str__(self):
return self.__class__.__name__
count = Count()
"""
Count the number of times an element is in the typed list.
Parameters
----------
x
The typed list to look into.
elem
The element we want to count in list.
The elements are compared with equals.
Notes
-----
Python implementation of count doesn't work when we want to count an ndarray
from a list. This implementation works in that case.
"""
class Length(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [T.scalar(dtype='int64')])
def perform(self, node, x, outputs):
(out,) = outputs
out[0] = numpy.asarray(len(x[0]), 'int64')
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name = inp[0]
output_name = out[0]
fail = sub['fail']
return """
if(!%(output_name)s)
%(output_name)s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(output_name)s))[0]=PyList_Size((PyObject*)%(x_name)s);
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
length = Length()
"""
Returns the size of a list.
Parameters
----------
x
Typed list.
"""
class MakeList(Op):
__props__ = ()
def make_node(self, a):
assert isinstance(a, (tuple, list))
a2 = []
for elem in a:
if not isinstance(elem, theano.gof.Variable):
elem = theano.tensor.as_tensor_variable(elem)
a2.append(elem)
if not all(a2[0].type == elem.type for elem in a2):
raise TypeError(
"MakeList need all input variable to be of the same type.")
tl = theano.typed_list.TypedListType(a2[0].type)()
return Apply(self, a2, [tl])
def perform(self, node, inputs, outputs):
(out,) = outputs
# We need to make sure that we don't get a view on our inputs
out[0] = [_lessbroken_deepcopy(inp) for inp in inputs]
make_list = MakeList()
"""
Build a Python list from those Theano variable.
Parameters
----------
a : tuple/list of Theano variable
Notes
-----
All Theano variables must have the same type.
"""
|
197137
|
import pytest
import logging
from ophyd import Device
logger = logging.getLogger(__name__)
def test_specify_version():
# Define a versioned Device:
class MyDevice(Device, version=1, version_type='ioc'):
...
info = MyDevice._class_info_
assert info == {'version': 1,
'versions': {1: MyDevice},
'version_type': 'ioc',
'version_of': MyDevice,
}
# Define a new version of that Device:
class MyDevice_V2(MyDevice, version=2, version_of=MyDevice):
...
info = MyDevice_V2._class_info_
assert info == {'version': 2,
'versions': {1: MyDevice,
2: MyDevice_V2},
'version_type': 'ioc',
'version_of': MyDevice,
}
# Ensure that the original Device has also been updated:
assert MyDevice._class_info_['versions'] == {1: MyDevice, 2: MyDevice_V2}
# Define a user device that inherits - but does not define a new version
class UserDevice(MyDevice_V2):
...
assert UserDevice._class_info_ == {'versions': {1: MyDevice, 2: MyDevice_V2},
'version': 2,
'version_type': 'ioc',
'version_of': MyDevice,
}
def test_version_requires_subclass():
class MyDevice(Device, version=1):
...
with pytest.raises(RuntimeError):
class UnrelatedDevice(Device, version=2, version_of=MyDevice):
...
|
197177
|
import logging
import functools
from ptsemseg.loss.loss import (
cross_entropy2d,
bootstrapped_cross_entropy2d,
multi_scale_cross_entropy2d,
)
logger = logging.getLogger("ptsemseg")
key2loss = {
"cross_entropy": cross_entropy2d,
"bootstrapped_cross_entropy": bootstrapped_cross_entropy2d,
"multi_scale_cross_entropy": multi_scale_cross_entropy2d,
}
def get_loss_function(cfg):
if cfg["training"]["loss"] is None:
logger.info("Using default cross entropy loss")
return cross_entropy2d
else:
loss_dict = cfg["training"]["loss"]
loss_name = loss_dict["name"]
loss_params = {k: v for k, v in loss_dict.items() if k != "name"}
if loss_name not in key2loss:
raise NotImplementedError("Loss {} not implemented".format(loss_name))
logger.info("Using {} with {} params".format(loss_name, loss_params))
return functools.partial(key2loss[loss_name], **loss_params)
|
197200
|
from .pipeline import UnitTestPipeline
from .sink import UnitTestSink
from .source import UnitTestSource
from .unit_test_case import ProcessorTestCase
from .unit_test_case import TestCase
__all__ = (
'UnitTestPipeline',
'UnitTestSink',
'UnitTestSource',
'ProcessorTestCase',
'TestCase',
)
|
197222
|
from django.core.management.base import BaseCommand
from api.models import ExternalPartner, SupportedActivity
from django.db import transaction
from api.logger import logger
class Command(BaseCommand):
help = 'Adds predefined tooltip texts to Actions (most probably one-time run only)'
@transaction.atomic
def handle(self, *args, **options):
partners = [
'MOH', 'WHO', 'UNICEF', 'WFP', 'Other UN agencies', 'Civil Society and NGO partners'
]
activities = [
'COVID-19 testing',
'Point of entry/ point of control screening',
'Contact tracing',
'Support for people in quarantine',
'Support for mild/ moderate cases in isolation',
'Risk communication and community engagement',
'Health and hygiene promotion',
'Surveillance, including active case finding',
'Community-based surveillance',
'Infection prevention and control (including WASH) in health facilities',
'Community WASH activities to reduce the risk of COVID-19 transmission',
'MHPSS support related to COVID-19',
'Isolation and clinical case management of COVID-19 cases',
'Support for COVID-19 vaccination',
'Ambulance services for COVID-19 cases',
'Maintaining non-COVID-19 ambulatory services',
'Blood services',
'Maintaining access to essential health services (e.g. routine immunization, malaria, elder care)',
'Management of the dead',
'Livelihoods activities',
'Food and in-kind assistance',
'Cash and voucher assistance',
'Skills development for livelihoods / economic activities',
'Shelter',
'Provision of safe and adequate shelter and settlements',
'Other',
]
error = False
p_to_add = []
a_to_add = []
partners_empty = not ExternalPartner.objects.exists()
activities_empty = not SupportedActivity.objects.exists()
if partners_empty:
for par in partners:
extpar = ExternalPartner(
name=par,
name_en=par
)
p_to_add.append(extpar)
try:
ExternalPartner.objects.bulk_create(p_to_add)
except Exception as ex:
logger.error(f'Could not create ExternalPartners. Error: {str(ex)}')
error = True
if activities_empty:
for act in activities:
supact = SupportedActivity(
name=act,
name_en=act
)
a_to_add.append(supact)
try:
SupportedActivity.objects.bulk_create(a_to_add)
except Exception as ex:
logger.error(f'Could not create SupportedActivities. Error: {str(ex)}')
error = True
if not error:
logger.info('Successfully added ExternalPartners and SupportedActivities.')
|
197233
|
import asyncio
from collections import deque
from firebot import ALIVE_NAME, CMD_HELP
from ..utils import admin_cmd, edit_or_reply, sudo_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Fire-X"
@bot.on(admin_cmd(pattern=r"star$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"star$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await edit_or_reply(event, "`stars.....`")
deq = deque(list("🦋✨🦋✨🦋✨🦋✨"))
for _ in range(48):
await asyncio.sleep(0.3)
await event.edit("".join(deq))
deq.rotate(1)
@bot.on(admin_cmd(pattern=r"boxs$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"boxs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await edit_or_reply(event, "`boxs...`")
deq = deque(list("🟥🟧🟨🟩🟦🟪🟫⬛⬜"))
for _ in range(999):
await asyncio.sleep(0.3)
await event.edit("".join(deq))
deq.rotate(1)
@bot.on(admin_cmd(pattern=r"rains$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"rains$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await edit_or_reply(event, "`Raining.......`")
deq = deque(list("🌬☁️🌩🌨🌧🌦🌥⛅🌤"))
for _ in range(48):
await asyncio.sleep(0.3)
await event.edit("".join(deq))
deq.rotate(1)
@bot.on(admin_cmd(pattern=r"deploy$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"deploy$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(12)
event = await edit_or_reply(event, "`Deploying...`")
animation_chars = [
"**Heroku Connecting To Latest Github Build **",
f"**Build started by user** {DEFAULTUSER}",
f"**Deploy** `535a74f0` **by user** {DEFAULTUSER}",
"**Restarting Heroku Server...**",
"**State changed from up to starting**",
"**Stopping all processes with SIGTERM**",
"**Process exited with** `status 143`",
"**Starting process with command** `python3 -m userbot`",
"**State changed from starting to up**",
"__INFO:Userbot:Logged in as 557667062__",
"__INFO:Userbot:Successfully loaded all plugins__",
"**Build Succeeded**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@bot.on(admin_cmd(pattern=r"dumps$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"dumps$", allow_sudo=True))
async def _(message):
if event.fwd_from:
return
try:
obj = message.pattern_match.group(1)
if len(obj) != 3:
raise IndexError
inp = " ".join(obj)
except IndexError:
inp = "🥞 🎂 🍫"
event = await edit_or_reply(message, "`droping....`")
u, t, g, o, s, n = inp.split(), "🗑", "<(^_^ <)", "(> ^_^)>", "⠀ ", "\n"
h = [(u[0], u[1], u[2]), (u[0], u[1], ""), (u[0], "", "")]
for something in reversed(
[
y
for y in (
[
"".join(x)
for x in (
f + (s, g, s + s * f.count(""), t),
f + (g, s * 2 + s * f.count(""), t),
f[:i] + (o, f[i], s * 2 + s * f.count(""), t),
f[:i] + (s + s * f.count(""), o, f[i], s, t),
f[:i] + (s * 2 + s * f.count(""), o, f[i], t),
f[:i] + (s * 3 + s * f.count(""), o, t),
f[:i] + (s * 3 + s * f.count(""), g, t),
)
]
for i, f in enumerate(reversed(h))
)
]
):
for something_else in something:
await asyncio.sleep(0.3)
try:
await event.edit(something_else)
except errors.MessageIdInvalidError:
return
@bot.on(admin_cmd(pattern=r"fleaveme$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"fleaveme$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(10)
animation_chars = [
"⬛⬛⬛\n⬛⬛⬛\n⬛⬛⬛",
"⬛⬛⬛\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️⬛\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬛↘️",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬇️↘️",
"⬛⬆️↗️\n⬛🔄➡️\n↙️⬇️↘️",
"⬛⬆️↗️\n⬅️🔄➡️\n↙️⬇️↘️",
"↖️⬆️↗️\n⬅️🔄➡️\n↙️⬇️↘️",
]
event = await edit_or_reply(event, "fleaveme....")
await asyncio.sleep(2)
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@bot.on(admin_cmd(pattern=r"loveu$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"loveu$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(70)
event = await edit_or_reply(event, "loveu")
animation_chars = [
"😀",
"👩🎨",
"😁",
"😂",
"🤣",
"😃",
"😄",
"😅",
"😊",
"☺",
"🙂",
"🤔",
"🤨",
"😐",
"😑",
"😶",
"😣",
"😥",
"😮",
"🤐",
"😯",
"😴",
"😔",
"😕",
"☹",
"🙁",
"😖",
"😞",
"😟",
"😢",
"😭",
"🤯",
"💔",
"❤",
"I Love You❤",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 35])
@bot.on(admin_cmd(pattern=r"planes$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"planes$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await edit_or_reply(event, "Wait for plane...")
await event.edit("✈-------------")
await event.edit("-✈------------")
await event.edit("--✈-----------")
await event.edit("---✈----------")
await event.edit("----✈---------")
await event.edit("-----✈--------")
await event.edit("------✈-------")
await event.edit("-------✈------")
await event.edit("--------✈-----")
await event.edit("---------✈----")
await event.edit("----------✈---")
await event.edit("-----------✈--")
await event.edit("------------✈-")
await event.edit("-------------✈")
await asyncio.sleep(3)
@bot.on(admin_cmd(pattern=r"polices$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"polices$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(12)
event = await edit_or_reply(event, "Police")
animation_chars = [
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
f"[{DEFAULTUSER}]({USERNAME}) **Police iz Here**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@bot.on(admin_cmd(pattern=r"jios$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"jios$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(19)
event = await edit_or_reply(event, "jio network boosting...")
animation_chars = [
"`Connecting To JIO NETWORK ....`",
"`█ ▇ ▆ ▅ ▄ ▂ ▁`",
"`▒ ▇ ▆ ▅ ▄ ▂ ▁`",
"`▒ ▒ ▆ ▅ ▄ ▂ ▁`",
"`▒ ▒ ▒ ▅ ▄ ▂ ▁`",
"`▒ ▒ ▒ ▒ ▄ ▂ ▁`",
"`▒ ▒ ▒ ▒ ▒ ▂ ▁`",
"`▒ ▒ ▒ ▒ ▒ ▒ ▁`",
"`▒ ▒ ▒ ▒ ▒ ▒ ▒`",
"*Optimising JIO NETWORK...*",
"`▒ ▒ ▒ ▒ ▒ ▒ ▒`",
"`▁ ▒ ▒ ▒ ▒ ▒ ▒`",
"`▁ ▂ ▒ ▒ ▒ ▒ ▒`",
"`▁ ▂ ▄ ▒ ▒ ▒ ▒`",
"`▁ ▂ ▄ ▅ ▒ ▒ ▒`",
"`▁ ▂ ▄ ▅ ▆ ▒ ▒`",
"`▁ ▂ ▄ ▅ ▆ ▇ ▒`",
"`▁ ▂ ▄ ▅ ▆ ▇ █`",
"**JIO NETWORK Boosted....**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 19])
@bot.on(admin_cmd(pattern=r"solarsystems$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"solarsystems$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(80)
event = await edit_or_reply(event, "solarsystem")
animation_chars = [
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 8])
CMD_HELP.update(
{
"animation2": """**Plugin : **`animation3`
**Commands in animation2 are **
• `.star`
• `.boxs`
• `.deploy`
• `.dumps`
• `.fleaveme`
• `.loveu`
• `.planes`
• `.polices`
• `.jios`
• `.solarsystems`
**Function : **__Different kinds of animation commands check yourself for their animation .__"""
}
)
|
197246
|
import numpy as np
import minibatch
import sys
import cv2
sys.path.append("../")
from config import config
class TestLoader:
def __init__(self, imdb, batch_size=1, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.shuffle = shuffle
self.size = len(imdb)#num of data
#self.index = np.arange(self.size)
self.cur = 0
self.data = None
self.label = None
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
#shuffle test image
np.random.shuffle(self.imdb)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
imdb = self.imdb[self.cur]
'''
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
#picked image
imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)]
# print(imdb)
'''
#print type(imdb)
#print len(imdb)
#assert len(imdb) == 1, "Single batch only"
im = cv2.imread(imdb)
self.data = im
class ImageLoader:
def __init__(self, imdb, im_size, batch_size=config.BATCH_SIZE, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.im_size = im_size
self.shuffle = shuffle
self.cur = 0
self.size = len(imdb)
self.index = np.arange(self.size)
self.num_classes = 2
self.batch = None
self.data = None
self.label = None
self.label_names = ['label', 'bbox_target']
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data, self.label
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)]
data, label = minibatch.get_minibatch(imdb, self.num_classes, self.im_size)
self.data = data['data']
self.label = [label[name] for name in self.label_names]
|
197250
|
import asyncio
import errno
import logging
import os
import sys
from concurrent import futures
import grpc
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import autoreload
from django_socio_grpc.settings import grpc_settings
logger = logging.getLogger("django_socio_grpc")
class Command(BaseCommand):
help = "Starts an async gRPC server"
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
"address",
nargs="?",
default=f"[::]:{grpc_settings.GRPC_CHANNEL_PORT}",
help="Optional address for which to open a port.",
)
parser.add_argument(
"--max-workers",
type=int,
default=10,
dest="max_workers",
help="Number of maximum worker threads.",
)
parser.add_argument(
"--dev",
action="store_true",
dest="development_mode",
help=(
"Run the server in development mode. This tells Django to use "
"the auto-reloader and run checks."
),
)
def handle(self, *args, **options):
self.address = options["address"]
self.development_mode = options["development_mode"]
self.max_workers = options["max_workers"]
# set GRPC_ASYNC to "true" in order to start server asynchronously
os.environ.setdefault("GRPC_ASYNC", "True")
asyncio.run(self.run(**options))
async def run(self, **options):
"""Run the server, using the autoreloader if needed."""
if self.development_mode:
if hasattr(autoreload, "run_with_reloader"):
autoreload.run_with_reloader(self.inner_run, **options)
else:
autoreload.main(self.inner_run, None, options)
else:
logger.info(
("Starting async gRPC server at %(address)s\n")
% {
"address": self.address,
},
extra={"emit_to_server": False},
)
await self._serve()
async def _serve(self):
try:
server = grpc.aio.server(
futures.ThreadPoolExecutor(max_workers=self.max_workers),
interceptors=grpc_settings.SERVER_INTERCEPTORS,
)
grpc_settings.ROOT_HANDLERS_HOOK(server)
server.add_insecure_port(self.address)
await server.start()
await server.wait_for_termination()
except OSError as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = e
errorData = f"Error: {error_text}"
logger.error(errorData)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
# ---------------------------------------
# ---- EXIT OF GRPC SERVER ---
except KeyboardInterrupt:
# Shuts down the server with 0 seconds of grace period. During the
# grace period, the server won't accept new connections and allow
# existing RPCs to continue within the grace period.
await server.stop(0)
logger.warning("Exit gRPC Server")
def inner_run(self, *args, **options):
# ------------------------------------------------------------------------
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
# ------------------------------------------------------------------------
autoreload.raise_last_exception()
logger.info('"Performing system checks...\n\n', extra={"emit_to_server": False})
self.check(display_num_errors=True)
# -----------------------------------------------------------
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
# -----------------------------------------------------------
self.check_migrations()
quit_command = "CTRL-BREAK" if sys.platform == "win32" else "CONTROL-C"
serverStartDta = (
f"Django version {self.get_version()}, using settings {settings.SETTINGS_MODULE}\n"
f"Starting development async gRPC server at {self.address}\n"
f"Quit the server with {quit_command}s.\n"
)
# --------------------------------------------
# --- START ASYNC GRPC SERVER ---
# --------------------------------------------
logger.info(serverStartDta, extra={"emit_to_server": False})
asyncio.run(self._serve())
|
197261
|
from os import getcwd, stat, remove
import json
from pyinfraboxutils.token import encode_job_token
from pyinfraboxutils.storage import storage
from temp_tools import TestClient, TestUtils
from test_template import ApiTestTemplate
class JobApiTest(ApiTestTemplate):
url_ns = 'api/job'
def test_job(self):
filename = 'temp_file.json'
filesize = 100
TestClient.execute("""INSERT INTO source_upload (id, project_id, filename, filesize)
VALUES (%s, %s, %s, %s)
""", [self.source_upload_id, self.project_id, filename, filesize])
r = TestClient.get(self.url_ns + '/job', self.job_headers)
self.assertEqual(r['project']['id'], self.project_id)
self.assertEqual(r['job']['id'], self.job_id)
def test_source(self):
data = { "data": "dummy_data" }
file_name = "test_source.tmp_test_file"
with open(file_name, "w") as source_data_file:
json.dump(data, source_data_file)
file_size = stat(file_name).st_size
TestClient.execute("""INSERT INTO source_upload (id, project_id, filename, filesize)
VALUES (%s, %s, %s, %s)
""", [self.source_upload_id, self.project_id, file_name, file_size])
TestClient.execute("""UPDATE build SET source_upload_id = %s
WHERE id = %s""", [self.source_upload_id, self.build_id])
with open(file_name, 'r') as source_data:
storage.upload_project(source_data, file_name)
remove(file_name)
response = TestClient.get(self.url_ns + '/source', self.job_headers)
response_size = TestUtils.get_stream_file_size(response.data)
self.assertEqual(response_size, file_size)
def test_cache(self):
filename = 'cache.tar.gz'
file_path = getcwd() + '/' + filename
test_data = open(file_path, 'rb')
files = { 'cache.tar.gz': test_data }
r = TestClient.post(self.url_ns + '/cache', data=files, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r, {})
r = TestClient.get(self.url_ns + '/cache', self.job_headers)
actual_cache_size = stat(file_path).st_size
received_cache_size = TestUtils.get_stream_file_size(r.data)
# Ensure downloaded and uploaded file sizes are equal
self.assertEqual(received_cache_size, actual_cache_size)
def test_output(self):
filename = 'output.tar.gz'
file_path = getcwd() + '/' + filename
test_data = open(file_path, 'rb')
files = { 'output.tar.gz': test_data }
r = TestClient.post(self.url_ns + '/output', data=files, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r, {})
def test_setrunning(self):
r = TestClient.post(self.url_ns + '/setrunning', {}, self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT state FROM job
WHERE id = %s""", [self.job_id])
job_state = r["state"]
self.assertEqual(job_state, 'running')
def test_create_jobs(self):
job_id = "6544af82-1c4f-5bb5-b1da-a54a0ced5e6f"
data = { "jobs": [{
"id": job_id,
"type": "docker",
"name": "test_job1",
"docker_file": "",
"build_only": False,
"resources": { "limits": { "cpu": 1, "memory": 512 }}
}]}
r = TestClient.post(self.url_ns + '/create_jobs', data, self.job_headers)
self.assertEqual(r, 'Successfully create jobs')
jobs = TestClient.execute_many("""SELECT id, name, type FROM job
WHERE id = %s""", [job_id])
self.assertEqual(jobs[0][0], data["jobs"][0]["id"])
self.assertEqual(jobs[0][1], self.job_name + "/" + data["jobs"][0]["name"])
# If type was equal to "docker" then it should replace it with "run_project_container" type
self.assertEqual(jobs[0][2], "run_project_container")
num_jobs = len(jobs)
self.assertEqual(num_jobs, 1)
def test_consoleupdate(self):
data = { "output": "some test output" }
r = TestClient.post(self.url_ns + '/consoleupdate', data=data, headers=self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT output FROM console
WHERE job_id = %s""", [self.job_id])
self.assertEqual(r["output"], data["output"])
def test_stats(self):
data = { "stats": "finished" }
r = TestClient.post(self.url_ns + '/stats', data=data, headers=self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT stats FROM job
WHERE id = %s""", [self.job_id])
self.assertEqual(r["stats"], "\"%s\"" % data["stats"])
def test_markup(self):
markup_data = {
"version": 1,
"title": "dummy_title",
"elements": [ {
"type": "text",
"text": "dummy_text"
}
]
}
file_name = "test_markup.tmp_test_file.json"
with open(file_name, 'w') as markup_data_file:
json.dump(markup_data, markup_data_file)
with open(file_name, 'r') as markup_data_file:
markup_data_file.seek(0)
data = { "file1": markup_data_file }
r = TestClient.post(self.url_ns + '/markup', data=data, headers=self.job_headers,
content_type='multipart/form-data')
remove(file_name)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT job_id, project_id, name, data FROM job_markup
WHERE job_id = %s""", [self.job_id])
print(r)
# check job_id
self.assertEqual(r[0], self.job_id)
# check project_id
self.assertEqual(r[1], self.project_id)
# check name
self.assertEqual(r[2], "file1")
received_data = json.loads(r[3])
# check data (title)
self.assertEqual(received_data["title"], markup_data["title"])
# check data (elements)
self.assertEqual(received_data["elements"], markup_data["elements"])
def test_badge(self):
job_data = {
"version": 1,
"subject": "subject_val",
"status": "status_val1",
"color": "green"
}
file_name = "test_badge.tmp_test_file.json"
with open(file_name, 'w') as job_data_file:
# Write data into json file
json.dump(job_data, job_data_file)
with open(file_name, 'r') as job_data_file:
data = { "file1": job_data_file }
result = TestClient.post(self.url_ns + '/badge', data=data, headers=self.job_headers,
content_type='multipart/form-data')
remove(file_name)
self.assertEqual(result, {})
r = TestClient.execute_one("""SELECT * from job_badge
WHERE job_id = %s""", [self.job_id])
self.assertEqual(r["job_id"], self.job_id)
self.assertEqual(r["project_id"], self.project_id)
self.assertEqual(r["subject"], job_data["subject"])
self.assertEqual(r["status"], job_data["status"])
self.assertEqual(r["color"], job_data["color"])
def test_testresult(self):
#test empty data
data = {"data": {}}
result = TestClient.post(self.url_ns + '/testresult', data=data, headers=self.job_headers)
self.assertEqual(result['message'], 'data not set')
# test wrong file format
test_filename = 'dummy_results.xml'
with open(test_filename, 'w') as test_file:
# just create file, there's no need to write anything into file
pass
with open(test_filename, 'r') as test_file:
data = { "data": test_file }
r = TestClient.post(self.url_ns + '/testresult', data=data, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r['message'], 'file ending not allowed')
remove(test_filename)
# test data
testresult_data = {
"version": 1,
"tests": [
{
"suite":"api_test_suite",
"name": "test_name1",
"status": "ok",
"duration": 5,
"message": "test_message1",
"stack":"stack1",
"measurements":[]
}, {
"suite":"api_test_suite",
"name": "test_name2",
"status": "failure",
"duration": 21,
"message": "test_message2",
"stack":"stack2",
"measurements":[]
}]
}
test_filename = 'dummy_test_result.json'
with open(test_filename, 'w') as test_file:
json.dump(testresult_data, test_file)
TestClient.execute("""TRUNCATE test_run""")
with open(test_filename, 'r') as test_file:
data = { "data": test_file }
r = TestClient.post(self.url_ns + '/testresult', data=data, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r, {})
remove(test_filename)
r = TestClient.execute_many("""SELECT state, duration, message, stack FROM test_run
WHERE job_id = %s""", [self.job_id])
# We receive doubles from the SQL query results so we need to convert values manually to be able to test it
for test in testresult_data["tests"]:
test["duration"] = float(test["duration"])
keys = ['status', 'duration', 'message', 'stack']
for i, received_row in enumerate(r):
# create dictionary from the list to compare it easier
row_dictionary = dict(zip(keys, received_row))
self.assertTrue(all(item in testresult_data["tests"][i].items()
for item in row_dictionary.items()))
def test_setfinished(self):
data = {
"state": "finished",
"message": "Job successfully finished"
}
r = TestClient.post(self.url_ns + '/setfinished', data, self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT state, message, console FROM job
WHERE id = %s""", [self.job_id])
self.assertEqual(r["state"], data["state"])
self.assertEqual(r["message"], data["message"])
self.assertEqual(r["console"], "")
|
197269
|
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
import psutil
import threading
from pynvml import (nvmlInit,
nvmlDeviceGetCount,
nvmlDeviceGetHandleByIndex,
nvmlDeviceGetUtilizationRates,
nvmlDeviceGetName)
def gpu_info():
"Returns a tuple of (GPU ID, GPU Description, GPU % Utilization)"
nvmlInit()
deviceCount = nvmlDeviceGetCount()
info = []
for i in range(0, deviceCount):
handle = nvmlDeviceGetHandleByIndex(i)
util = nvmlDeviceGetUtilizationRates(handle)
desc = nvmlDeviceGetName(handle)
info.append((i, desc, util.gpu)) #['GPU %i - %s' % (i, desc)] = util.gpu
return info
utils = []
class SysMonitor(threading.Thread):
shutdown = False
def __init__(self):
self.utils = []
self.start_time = time.time()
self.duration = 0
threading.Thread.__init__(self)
def run(self):
utils = []
while not self.shutdown:
dt = datetime.datetime.now()
util = gpu_info()
cpu_percent = psutil.cpu_percent()
self.utils.append([dt] + [x[2] for x in util] + [cpu_percent])
time.sleep(.1)
def stop(self):
self.shutdown = True
self.duration = time.time() - self.start_time
def plot(self, title, vert=False):
if vert:
fig, ax = plt.subplots(2, 1, figsize=(15, 6))
else:
fig, ax = plt.subplots(1, 2, figsize=(15, 6))
fig.suptitle(title, size=24)
ax[0].title.set_text('GPU Utilization')
ax[0].plot([u[1] for u in self.utils])
ax[0].set_ylim([0, 100])
ax[1].title.set_text('CPU Utilization')
ax[1].plot([u[2] for u in self.utils])
ax[1].set_ylim([0, 100])
plt.tight_layout(rect=[0, 0.03, 1, 0.9])
|
197323
|
import numpy as np
import tensorflow as tf
from agents import TabularBasicAgent, capacities
class TabularMCAgent(TabularBasicAgent):
"""
Agent implementing tabular Q-learning.
"""
def set_agent_props(self):
self.discount = self.config['discount']
self.N0 = self.config['N0']
self.min_eps = self.config['min_eps']
self.initial_q_value = self.config['initial_q_value']
def get_best_config(self, env_name=""):
cartpolev0 = {
'discount': .99
, 'N0': 10
, 'min_eps': 0.001
, 'initial_q_value': 0
}
mountaincarv0 = {
'discount': 0.99
, 'N0': 10
, 'min_eps': 0.001
, 'initial_q_value': 0 # This is an optimistic initialization
}
acrobotv1 = {
"discount": 0.999
, "initial_q_value": 0 # This is an optimistic initialization
, "N0": 100
, "min_eps": 0.11409578938939571
}
return {
'CartPole-v0': cartpolev0
, 'MountainCar-v0': mountaincarv0
, 'Acrobot-v1': acrobotv1
}.get(env_name, cartpolev0)
@staticmethod
def get_random_config(fixed_params={}):
get_discount = lambda: 0.98 + (1 - 0.98) * np.random.random(1)[0]
get_N0 = lambda: np.random.randint(1, 1e3)
get_min_eps = lambda: 1e-4 + (2e-1 - 1e-4) * np.random.random(1)[0]
get_initial_q_value = lambda: 0
random_config = {
'discount': get_discount()
, 'N0': get_N0()
, 'min_eps': get_min_eps()
, 'initial_q_value': get_initial_q_value()
}
random_config.update(fixed_params)
return random_config
def build_graph(self, graph):
with graph.as_default():
tf.set_random_seed(self.random_seed)
self.inputs_plh = tf.placeholder(tf.int32, shape=[None], name="inputs_plh")
q_scope = tf.VariableScope(reuse=False, name='QValues')
with tf.variable_scope(q_scope):
self.Qs = tf.get_variable('Qs'
, shape=[self.nb_state, self.action_space.n]
, initializer=tf.constant_initializer(self.initial_q_value)
, dtype=tf.float32
)
tf.summary.histogram('Qarray', self.Qs)
self.q_preds_t = tf.gather(self.Qs, self.inputs_plh)
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
if 'UCB' in self.config and self.config['UCB']:
self.actions_t, self.probs_t = capacities.tabular_UCB(
self.Qs, self.inputs_plh
)
else:
self.actions_t, self.probs_t = capacities.tabular_eps_greedy(
self.inputs_plh, self.q_preds_t, self.nb_state, self.env.action_space.n, self.N0, self.min_eps
)
self.action_t = self.actions_t[0]
self.q_value_t = self.q_preds_t[0][self.action_t]
learning_scope = tf.VariableScope(reuse=False, name='Learning')
with tf.variable_scope(learning_scope):
self.rewards_plh = tf.placeholder(tf.float32, shape=[None], name="rewards_plh")
self.targets_t = capacities.get_mc_target(self.rewards_plh, self.discount)
self.loss, self.train_op = capacities.tabular_learning(
self.Qs, self.inputs_plh, self.actions_t, self.targets_t
)
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('score', self.score_plh)
self.loss_plh = tf.placeholder(tf.float32, shape=[])
self.loss_sum_t = tf.summary.scalar('loss', self.loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def act(self, obs, done=False):
state_id = self.phi(obs, done)
act = self.sess.run(self.action_t, feed_dict={
self.inputs_plh: [ state_id ]
})
return act, state_id
def learn_from_episode(self, env, render=False):
score = 0
episodeType = np.dtype([('states', 'int32'), ('actions', 'int32'), ('rewards', 'float32')])
episode = np.array([], dtype=episodeType)
done = False
obs = env.reset()
while not done:
if render:
env.render()
act, state_id= self.act(obs)
obs, reward, done, info = env.step(act)
memory = np.array([(state_id, act, reward)], dtype=episodeType)
episode = np.append(episode, memory)
score += reward
_, loss = self.sess.run([self.train_op, self.loss], feed_dict={
self.inputs_plh: episode['states'],
self.actions_t: episode['actions'],
self.rewards_plh: episode['rewards'],
})
summary, _, episode_id = self.sess.run([self.all_summary_t, self.inc_ep_id_op, self.episode_id], feed_dict={
self.score_plh: score,
self.loss_plh: loss
})
self.sw.add_summary(summary, episode_id)
|
197386
|
import pytest
@pytest.mark.django_db
def test_welcome_django(client):
"""
Asserts whether the base URL 200s
200 means that the HTTP request was successful and
that the frontpage exists.
"""
response = client.get("http://localhost:8000/")
assert response.status_code == 200
|
197415
|
from opentuner.resultsdb.models import *
class DriverBase(object):
"""
shared base class between MeasurementDriver and SearchDriver
"""
def __init__(self,
session,
tuning_run,
objective,
tuning_run_main,
args,
**kwargs):
self.args = args
self.objective = objective
self.session = session
self.tuning_run_main = tuning_run_main
self.tuning_run = tuning_run
self.program = tuning_run.program
def results_query(self,
generation=None,
objective_ordered=False,
config=None):
self.session.flush()
q = self.session.query(Result)
q = q.filter_by(tuning_run=self.tuning_run)
if config:
q = q.filter_by(configuration=config)
if generation is not None:
subq = (self.session.query(DesiredResult.result_id)
.filter_by(tuning_run=self.tuning_run,
generation=generation))
q = q.filter(Result.id.in_(subq.subquery()))
if objective_ordered:
q = self.objective.result_order_by(q)
return q
def requests_query(self):
q = self.session.query(DesiredResult).filter_by(tuning_run=self.tuning_run)
return q
|
197435
|
from setup import *
from pyglet_gui.manager import Manager
from pyglet_gui.buttons import Button
from pyglet_gui.containers import VerticalContainer, HorizontalContainer, GridContainer
from pyglet_gui.theme import Theme
theme = Theme({"font": "Lucida Grande",
"font_size": 12,
"text_color": [255, 255, 255, 255],
"gui_color": [255, 0, 0, 255],
"button": {
"down": {
"image": {
"source": "button-down.png",
"frame": [8, 6, 2, 2],
"padding": [18, 18, 8, 6]
},
"text_color": [0, 0, 0, 255]
},
"up": {
"image": {
"source": "button.png",
"frame": [6, 5, 6, 3],
"padding": [18, 18, 8, 6]
}
}
}
}, resources_path='../theme/')
hlay = HorizontalContainer(content=[VerticalContainer(content=[Button("(1,1)"), Button("(1,2)")]),
VerticalContainer(content=[Button("(2,1)"), Button("(2,2)")])])
grid = GridContainer([[Button("(1,1)"), Button("(1,2)")],
[Button("(2,1)"), Button("(2,2)")]])
vlay = VerticalContainer([hlay, grid])
Manager(vlay, window=window, batch=batch, theme=theme)
pyglet.app.run()
|
197446
|
import itertools
def raster(input_size):
return itertools.product(*[range(dim_size) for dim_size in input_size])
|
197463
|
import logging
from .protocol.response import RAPDU
from .util import format_bytes
class TransmissionProtocol(object):
"""Transport layer. Only currently supports T0 transport.
Defined in: EMV 4.3 Book 1 section 9
See also Annex A for examples.
"""
def __init__(self, connection):
"""Connection should be a pyscard connection."""
self.log = logging.getLogger(__name__)
self.connection = connection
self.connection.connect(connection.T0_protocol)
assert connection.getProtocol() == connection.T0_protocol
self.log.info("Connected to reader")
def transmit(self, tx_data):
"""Send raw data to the card, and receive the reply.
tx_data should be a list of bytes.
Returns a tuple of (data, sw1, sw2) where sw1 and sw2
are the protocol status bytes.
"""
self.log.debug("Tx: %s", format_bytes(tx_data))
data, sw1, sw2 = self.connection.transmit(tx_data)
self.log.debug("Rx: %s, SW1: %02x, SW2: %02x", format_bytes(data), sw1, sw2)
return data, sw1, sw2
def exchange(self, capdu):
"""Send a command to the card and return the response.
Accepts a CAPDU object and returns a RAPDU.
"""
send_data = capdu.marshal()
data, sw1, sw2 = self.transmit(send_data)
if sw1 == 0x6C:
# ICC asks to reduce data size requested
send_data[4] = sw2
data, sw1, sw2 = self.transmit(send_data)
while sw1 == 0x61:
# ICC has continuation data
d, sw1, sw2 = self.transmit([0x00, 0xC0, 0x00, 0x00, sw2])
data = data[:-2] + d
res = RAPDU.unmarshal(data + [sw1, sw2])
return res
|
197476
|
from torch.utils.data.dataloader import DataLoader
from typing import Dict, List, Tuple, Callable, Text, IO, Optional
import numpy as np
import onnx
__DOMAIN__ = ""
__OPSET_VERSION__ = 12
from furiosa_sdk_quantizer.frontend.onnx import spec
from furiosa_sdk_quantizer.frontend.onnx.utils.inference_shape import InferenceShape
from furiosa_sdk_quantizer.frontend.onnx.utils.version_checker import CheckVersion
from furiosa_sdk_quantizer.frontend.onnx.transformer.polish_model import PolishModel
from furiosa_sdk_quantizer.frontend.onnx.transformer.eliminate_argmax_output import (
EliminateArgmaxOutput,
)
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_bn_into_conv import FuseBnIntoConv
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_lp_normalization import (
FuseLpNormalization,
)
from furiosa_sdk_quantizer.frontend.onnx.transformer.deprecated.fuse_scalar_mul_into_conv import (
FuseScalarMulIntoConv,
)
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_conv import FuseConv
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_depth_to_space import FuseDepthToSpace
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_gelu import FuseGELU
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_layer_normalization import (
FuseLayerNormalization,
)
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_redundant_reshape_pattern import (
FuseRedundantReshapePattern,
)
from furiosa_sdk_quantizer.frontend.onnx.transformer.fuse_pad import FusePad
from furiosa_sdk_quantizer.frontend.onnx.transformer.eliminate_redundant_reshape_pattern import (
EliminateRedundantReshapePattern,
)
from furiosa_sdk_quantizer.frontend.onnx.transformer.convert_conv1d_to_conv2d import (
ConvertConv1dToConv2d,
)
from furiosa_sdk_quantizer.frontend.onnx.quantizer.calibrator import ONNXCalibrator
from furiosa_sdk_quantizer.frontend.onnx.utils.inference_shape import InferenceShape
from furiosa_sdk_quantizer.frontend.onnx.quantizer import calibrator, quantizer
def _transform(
transformers: List[Callable[[onnx.ModelProto], onnx.ModelProto]], model: onnx.ModelProto
) -> onnx.ModelProto:
for transform in transformers:
model = transform(model)
return model
def _polish_model(model: onnx.ModelProto) -> onnx.ModelProto:
return _transform(
[
PolishModel().transform,
],
model,
)
def _inference_shape(model: onnx.ModelProto) -> onnx.ModelProto:
return InferenceShape(model).inference_shape()
def _reify(model: onnx.ModelProto) -> onnx.ModelProto:
transformers = [
ConvertConv1dToConv2d().transform,
FuseConv().transform,
FusePad().transform,
FuseBnIntoConv().transform,
FuseDepthToSpace().transform,
FuseGELU().transform,
FuseLayerNormalization().transform,
FuseLpNormalization().transform,
FuseRedundantReshapePattern().transform,
EliminateRedundantReshapePattern().transform,
]
return _transform(transformers, model)
def export_spec(model: onnx.ModelProto, output: IO[Text]):
model = _transform([_inference_shape, _reify], model)
spec.export_spec.OnnxExportSpec(model).dump(output)
def optimize_model(model: onnx.ModelProto) -> onnx.ModelProto:
model = _transform([CheckVersion().transform], model)
model = _transform([_polish_model], model)
# Apply _inference_shape if there exists 1) a node output whose value
# information is not stored in model.graph.value_info or
# model.graph.output or 2) a value_info in model.graph.value_info whose
# shape information is empty.
value_names = set(value_info.name for value_info in model.graph.value_info)
value_names.update(value_info.name for value_info in model.graph.output)
if any(
value_name not in value_names for node in model.graph.node for value_name in node.output
) or any(not value_info.type.tensor_type.shape.dim for value_info in model.graph.value_info):
model = _transform([_inference_shape], model)
# TODO check if graph_transform should apply.
model = _transform([_reify], model)
return model
def build_calibration_model(model: onnx.ModelProto) -> onnx.ModelProto:
model = optimize_model(model)
return ONNXCalibrator(model).build_calibration_model()
def quantize(
model: onnx.ModelProto,
per_channel: bool,
static: bool,
mode: quantizer.QuantizationMode,
dynamic_ranges: Dict[str, Tuple[float, float]],
) -> onnx.ModelProto:
return quantizer.FuriosaONNXQuantizer(
model, per_channel, static, mode, dynamic_ranges
).quantize()
def post_training_quantize(
model: onnx.ModelProto,
dataset: List[Dict[str, np.ndarray]],
per_channel: bool = True,
) -> onnx.ModelProto:
"""Post-training-quantizes an ONNX model with a calibration dataset.
Args:
model: An ONNX model to quantize.
dataset: A calibration dataset.
per_channel: If per_channel is True, Conv's filters are
per-channel quantized. Otherwise, they are per-tensor
quantized.
Returns:
An ONNX model post-training-quantized with the calibration
dataset.
"""
model = optimize_model(model)
ranges = calibrate(model, dataset)
return quantize(model, per_channel, True, quantizer.QuantizationMode.dfg, ranges)
def post_training_quantization_with_random_calibration(
model: onnx.ModelProto,
per_channel: bool,
static: bool,
mode: quantizer.QuantizationMode,
num_data: Optional[int] = None,
) -> onnx.ModelProto:
if not static:
raise Exception("Currently only supports static quantization.")
if mode not in [quantizer.QuantizationMode.dfg, quantizer.QuantizationMode.fake]:
raise Exception("Currently only supports QuantizationMode dfg or fake.")
model = optimize_model(model)
calibration_model = build_calibration_model(model)
dynamic_ranges = ONNXCalibrator(calibration_model).calibrate_with_random(num_data)
return quantize(model, per_channel, static, mode, dynamic_ranges)
def calibrate(
model: onnx.ModelProto, dataset: List[Dict[str, np.ndarray]]
) -> Dict[str, Tuple[float, float]]:
"""Estimates the range of tensors in a model, based on a dataset.
Args:
model: An ONNX model to calibrate.
dataset: A calibration dataset.
Returns:
A dict mapping tensors in the model to their minimum and maximum
values.
"""
augmented_model = ONNXCalibrator(model).build_calibration_model()
return calibrator.calibrate(augmented_model, dataset)
def calibrate_with_random(
model: onnx.ModelProto, num_data: Optional[int] = None
) -> Dict[str, Tuple[float, float]]:
model = optimize_model(model)
calibration_model = ONNXCalibrator(model).build_calibration_model()
return ONNXCalibrator(calibration_model).calibrate_with_random(num_data)
def calibrate_with_data_loader(
model: onnx.ModelProto, loader: DataLoader
) -> Dict[str, Tuple[float, float]]:
model = optimize_model(model)
calibration_model = ONNXCalibrator(model).build_calibration_model()
return ONNXCalibrator(calibration_model).calibrate_with_data_loader(loader)
|
197482
|
from dataclasses import dataclass, fields
from typing import List, ClassVar, Any, Tuple
import re
from json_debug import to_json, StrJsonMixin, snake2camel, snake2pascal
__all__ = [
'Term', 'Var', 'Atom', 'Struct', 'Functor', 'Clause',
'Addr', 'Register', 'StackAddr', 'AtomAddr',
'Instruction', 'Builtin',
'GetInstr', 'GetValue', 'GetVariable', 'GetAtom', 'GetStruct',
'PutInstr', 'PutValue', 'PutVariable', 'PutAtom', 'PutStruct',
'UnifyInstr', 'UnifyValue', 'UnifyVariable', 'UnifyAtom',
'Call', 'Execute', 'Proceed', 'Halt', 'Allocate', 'Deallocate',
'to_list', 'from_list',
]
def is_var_name(name: str) -> bool:
return bool(name) and (name[0].isupper() or name[0] == '_')
def atom_needs_escape(name: str) -> bool:
LEXICAL = "()',"
SPACE = r" \n\t"
SYMBOLS = r"\\=[\].:!@#$%&*+{}^~?/<>-"
if not name:
return True
if is_var_name(name):
return True
if re.search(f'[{LEXICAL}{SPACE}]', name):
return True
if re.match(r'\d.*\D', name):
return True
if re.match(f'[{SYMBOLS}].*[^{SYMBOLS}]', name):
return True
return False
class Term:
pass
class Var(Term):
def __init__(self, name: str):
if not is_var_name(name):
raise ValueError(f"Invalid var name: {name}")
self.name = name
def __str__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, Var) and self.name == other.name
def __repr__(self):
return f"Var({self.name!r})"
class Atom(Term):
def __init__(self, name: str):
self.name = name
def __str__(self):
if atom_needs_escape(self.name):
return "'" + self.name.replace("'", "''") + "'"
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, Atom) and self.name == other.name
def __repr__(self):
return f"Atom({self.name!r})"
@dataclass(frozen=True)
class Functor(StrJsonMixin):
name: str
arity: int
def __str__(self):
return f"{self.name}/{self.arity}"
class Struct(Term):
def __init__(self, name: str, *args: "Term"):
if is_var_name(name):
raise ValueError(f"Invalid struct name: {name}")
self.name = name
self.args = args
self._hash = hash((name, args))
@property
def arity(self) -> int:
return len(self.args)
def functor(self) -> Functor:
return Functor(self.name, self.arity)
def __str__(self):
return str_term(self)
def __hash__(self):
return self._hash
def __eq__(self, other):
return isinstance(other, Struct) and self.name == other.name and self.args == other.args
def __repr__(self):
if not self.args:
return f"Struct({self.name!r})"
args = ", ".join(repr(arg) for arg in self.args)
return f"Struct({self.name!r}, {args})"
def str_term(term: Term):
stack: List[Union[Term, str]] = [term]
s = ""
while stack:
elem = stack.pop()
if isinstance(elem, (Atom, Var, str)):
s += str(elem)
elif isinstance(elem, Struct):
s += f"{elem.name}("
stack.append(")")
for i, arg in enumerate(elem.args[::-1]):
if i > 0:
stack.append(", ")
stack.append(arg)
else:
raise ValueError(f"unhandled Term type {type(elem)} ({elem!r})")
return s
def to_list(terms: List[Term], tail: Term = Atom("[]")) -> Term:
l = tail
for term in terms[::-1]:
l = Struct(".", term, l)
return l
def from_list(l: Term) -> Tuple[List[Term], Term]:
terms: List[Term] = []
while isinstance(l, Struct) and l.functor() == Functor(".", 2):
head, tail = l.args
terms.append(head)
l = tail
return terms, l
class Clause:
def __init__(self, head: Struct, *body: Struct):
self.head = head
self.body = body
def __hash__(self):
return hash((self.head, self.body))
def __eq__(self, other):
return isinstance(other, Clause) and self.head == other.head and self.body == other.body
def __str__(self):
if not self.body:
return f"{self.head}."
body = ",\n ".join(str(s) for s in self.body)
return f"{self.head} :-\n {body}."
def __repr__(self):
return str(self)
class Addr(StrJsonMixin):
pass
@dataclass(frozen=True)
class Register(Addr):
index: int
def __str__(self):
return f"X{self.index}"
@dataclass(frozen=True)
class StackAddr(Addr):
index: int
def __str__(self):
return f"Y{self.index}"
@dataclass(frozen=True)
class AtomAddr(Addr):
atom: Atom
def __str__(self):
return f"@{self.atom}"
@dataclass(frozen=True)
class Instruction:
name: ClassVar[str]
def __str__(self):
args = ", ".join(str(getattr(self, field.name)) for field in fields(self))
return f"{self.name} {args}"
def to_json(self):
obj = {'Type': snake2camel(self.name)}
for field in fields(self):
value = getattr(self, field.name)
obj[snake2pascal(field.name)] = to_json(value)
return obj
@dataclass(frozen=True)
class Builtin(Instruction):
name = "builtin"
args: List[Any]
def __str__(self):
f, *args = self.args
args = ", ".join(str(arg) for arg in args)
return f"{f} {args}"
@dataclass(frozen=True)
class GetInstr(Instruction):
reg: Register
@dataclass(frozen=True)
class GetValue(GetInstr):
name = "get_val"
addr: Addr
@dataclass(frozen=True)
class GetVariable(GetInstr):
name = "get_var"
addr: Addr
@dataclass(frozen=True)
class GetAtom(GetInstr):
name = "get_atom"
atom: Atom
@dataclass(frozen=True)
class GetStruct(GetInstr):
name = "get_struct"
functor: Functor
@dataclass(frozen=True)
class PutInstr(Instruction):
reg: Register
@dataclass(frozen=True)
class PutValue(PutInstr):
name = "put_val"
addr: Addr
@dataclass(frozen=True)
class PutVariable(PutInstr):
name = "put_var"
addr: Addr
@dataclass(frozen=True)
class PutAtom(PutInstr):
name = "put_atom"
atom: Atom
@dataclass(frozen=True)
class PutStruct(PutInstr):
name = "put_struct"
functor: Functor
@dataclass(frozen=True)
class UnifyInstr(Instruction):
pass
@dataclass(frozen=True)
class UnifyValue(UnifyInstr):
name = "unify_val"
addr: Addr
@dataclass(frozen=True)
class UnifyVariable(UnifyInstr):
name = "unify_var"
addr: Addr
@dataclass(frozen=True)
class UnifyAtom(UnifyInstr):
name = "unify_atom"
atom: Atom
@dataclass(frozen=True)
class Call(Instruction):
name = "call"
functor: Functor
@dataclass(frozen=True)
class Execute(Instruction):
name = "execute"
functor: Functor
@dataclass(frozen=True)
class Proceed(Instruction):
name = "proceed"
@dataclass(frozen=True)
class Halt(Instruction):
name = "halt"
@dataclass(frozen=True)
class Allocate(Instruction):
name = "allocate"
num_perms: int
@dataclass(frozen=True)
class Deallocate(Instruction):
name = "deallocate"
|
197483
|
import argparse
import sys
from edge.config import EdgeConfig
from edge.exception import EdgeException
def add_config_parser(subparsers):
parser = subparsers.add_parser("config", help="Configuration related actions")
actions = parser.add_subparsers(title="action", dest="action", required=True)
actions.add_parser("get-region", help="Get configured region")
def run_config_actions(args: argparse.Namespace):
if args.action == "get-region":
with EdgeConfig.context(silent=True) as config:
print(config.google_cloud_project.region)
sys.exit(0)
else:
raise EdgeException("Unexpected experiments command")
|
197485
|
from .stationarybootstrap import Bootstrap
from .crossquantilogram import CrossQuantilogram
from .qtests import BoxPierceQ,LjungBoxQ
from .utils import DescriptiveStatistics
from .api import CQBS,CQBS_alphas,CQBS_years
from .plot import bar_example,heatmap_example,rolling_example
__doc__ = """The `Cross-Quantilogram`(CQ) is a correlation statistics that measures the quantile dependence between two time series. It can test the hypothesis that one time series has no directional predictability to another. Stationary bootstrap method helps establish the asymptotic distribution for CQ statistics and other corresponding test statistics."""
|
197506
|
class Solution:
def isIdealPermutation(self, A):
"""
:type A: List[int]
:rtype: bool
"""
size, m = len(A), 0
for i in range(size - 2):
m = max(m, A[i])
if m > A[i + 2]:
return False
return True
|
197520
|
from django.apps import AppConfig
class OrderanddeliveryConfig(AppConfig):
name = 'OrderAndDelivery'
|
197542
|
import pytest
from yggdrasil import components
def test_import_component():
r"""Test dynamic import of component."""
# Test use of default
components.import_component('serializer')
components.import_component('serializer')
# Test explict type (but still new to registry)
components.import_component('serializer', 'direct')
components.import_component('serializer', 'direct')
# Using key specific to component
components.import_component('serializer', seritype='direct')
# Test using class name
components.import_component('serializer', 'PandasSerialize')
# Test access to file through comm (including error)
components.import_component('comm', 'pickle')
with pytest.raises(components.ComponentError):
components.import_component('comm', 'invalid')
# Tests with registry suspended
out = components.suspend_registry()
components.import_component('serializer')
components.import_component('serializer', 'direct')
components.import_component('serializer', 'PandasSerialize')
components.restore_registry(out)
def test_create_component():
r"""Test dynamic creation of component instance."""
x = components.create_component('serializer', seritype='direct')
assert(components.isinstance_component(x, ['serializer']))
assert(components.isinstance_component(x, ['comm', 'serializer']))
assert(not components.isinstance_component(x, ['comm']))
x = components.create_component('serializer')
assert(components.isinstance_component(x, ['serializer']))
|
197546
|
from sqlalchemy.sql import text
from sqlalchemy.schema import (
MetaData,
Table,
Column,
CheckConstraint,
ForeignKeyConstraint
)
from sqlalchemy.types import (
Integer,
UnicodeText,
BigInteger,
Boolean,
)
metadata = MetaData()
roles = Table(
'roles', metadata,
Column('role_name', UnicodeText, primary_key=True),
Column('role_password', UnicodeText, nullable=False),
Column('role_email', UnicodeText, nullable=False),
Column('role_phone', UnicodeText),
Column('is_active', Boolean, nullable=False, server_default=text('True')),
Column('is_admin', Boolean, nullable=False, server_default=text('False')),
schema="application")
groups = Table(
'groups', metadata,
Column('group_name', UnicodeText, nullable=False, primary_key=True),
Column('group_description', UnicodeText),
Column('group_kind', UnicodeText, nullable=False, primary_key=True),
CheckConstraint("group_kind IN ('instance', 'role')"),
schema="application")
instances = Table(
'instances', metadata,
Column('agent_address', UnicodeText, nullable=False, primary_key=True),
Column('agent_port', Integer, nullable=False, primary_key=True),
Column('agent_key', UnicodeText),
Column('hostname', UnicodeText, nullable=False),
Column('cpu', Integer),
Column('memory_size', BigInteger),
Column('pg_port', Integer),
Column('pg_version', UnicodeText),
Column('pg_version_summary', UnicodeText),
Column('pg_data', UnicodeText),
Column('notify', Boolean, nullable=False, server_default=text('True')),
Column('comment', UnicodeText),
schema="application")
plugins = Table(
'plugins', metadata,
Column('agent_address', UnicodeText, nullable=False, primary_key=True),
Column('agent_port', Integer, nullable=False, primary_key=True),
Column('plugin_name', UnicodeText, nullable=False, primary_key=True),
ForeignKeyConstraint(['agent_address', 'agent_port'],
['application.instances.agent_address',
'application.instances.agent_port'],
ondelete="CASCADE", onupdate="CASCADE"),
schema="application")
instance_groups = Table(
'instance_groups', metadata,
Column('agent_address', UnicodeText, nullable=False, primary_key=True),
Column('agent_port', Integer, nullable=False, primary_key=True),
Column('group_name', UnicodeText, nullable=False, primary_key=True),
Column('group_kind', UnicodeText, nullable=False,
server_default=text('instance')),
CheckConstraint("group_kind = 'instance'"),
ForeignKeyConstraint(['agent_address', 'agent_port'],
['application.instances.agent_address',
'application.instances.agent_port'],
ondelete="CASCADE", onupdate="CASCADE"),
ForeignKeyConstraint(['group_name', 'group_kind'],
['application.groups.group_name',
'application.groups.group_kind'],
ondelete="CASCADE", onupdate="CASCADE"),
schema="application")
role_groups = Table(
'role_groups', metadata,
Column('role_name', UnicodeText, nullable=False, primary_key=True),
Column('group_name', UnicodeText, nullable=False, primary_key=True),
Column('group_kind', UnicodeText, nullable=False,
server_default=text('role')),
CheckConstraint("group_kind = 'role'"),
ForeignKeyConstraint(['role_name'],
['application.roles.role_name'],
ondelete="CASCADE", onupdate="CASCADE"),
ForeignKeyConstraint(['group_name', 'group_kind'],
['application.groups.group_name',
'application.groups.group_kind'],
ondelete="CASCADE", onupdate="CASCADE"),
schema="application")
access_role_instance = Table(
'access_role_instance', metadata,
Column('role_group_name', UnicodeText, nullable=False, primary_key=True),
Column('role_group_kind', UnicodeText, nullable=False,
server_default=text('role')),
Column('instance_group_name', UnicodeText, nullable=False,
primary_key=True),
Column('instance_group_kind', UnicodeText, nullable=False,
server_default=text('instance')),
CheckConstraint("role_group_kind = 'role'"),
CheckConstraint("instance_group_kind = 'instance'"),
ForeignKeyConstraint(['role_group_name', 'role_group_kind'],
['application.groups.group_name',
'application.groups.group_kind'],
ondelete="CASCADE", onupdate="CASCADE"),
ForeignKeyConstraint(['instance_group_name', 'instance_group_kind'],
['application.groups.group_name',
'application.groups.group_kind'],
ondelete="CASCADE", onupdate="CASCADE"),
schema="application")
|
197663
|
from django.core import checks
from django.db import models
class ModelRaisingMessages(models.Model):
@classmethod
def check(self, **kwargs):
return [checks.Warning('A warning')]
|
197680
|
import numpy as np
import random
def nuclear_norm_alpha_generation(num_models, **params):
return np.array(
[0]
+ [
2 ** x
for x in np.linspace(
start=params["options"][0],
stop=params["options"][1],
num=(num_models - 1),
)
]
)
def hidden_size_generation(num_models, **params):
return random.choices(
list(
{
int(2 ** x)
for x in np.arange(
params["options"][0], params["options"][1], params["step"]
)
}
),
k=num_models,
)
|
197702
|
import json
from django.test import RequestFactory
from tally_ho.apps.tally.views.data import tally_list_view as views
from tally_ho.libs.permissions import groups
from tally_ho.libs.tests.test_base import create_tally, TestBase
class TestTallyListView(TestBase):
"""
Test tally list class base views.
"""
def setUp(self):
self.factory = RequestFactory()
self._create_permission_groups()
self._create_and_login_user()
self._add_user_to_group(self.user, groups.SUPER_ADMINISTRATOR)
def test_tally_list_view(self):
"""
Test that tally list view template is rendered correctly
"""
tally = create_tally()
tally.users.add(self.user)
view = views.TallyListView.as_view()
request = self.factory.get('/')
request.user = self.user
response = view(request)
self.assertContains(response, "Tally List")
self.assertContains(response, "Id")
self.assertContains(response, "Name")
self.assertContains(response, "Creation")
self.assertContains(response, "Last Modification")
self.assertContains(response, "Administration")
self.assertContains(response, "Actions")
def test_tally_list_data_view(self):
"""
Test that tally list data view returns correct data
"""
tally = create_tally()
tally.users.add(self.user)
view = views.TallyListDataView.as_view()
request = self.factory.get('/')
request.user = self.user
response = view(request)
tally_id, tally_name, created_date, modified_formatted_date,\
admin_view_link, edit_link = json.loads(
response.content.decode())['data'][0]
self.assertEquals(
admin_view_link,
f'<a href="/super-administrator/{tally.id}/"'
' class ="btn btn-default btn-small">Admin View</a>')
self.assertEquals(
edit_link,
f'<a href="/tally-manager/update-tally/{tally.id}/"'
' class ="btn btn-default btn-small">Edit</a>')
self.assertEquals(tally_id, str(tally.id))
self.assertEquals(tally_name, tally.name)
self.assertEquals(created_date, str(tally.created_date))
self.assertEquals(
modified_formatted_date,
tally.modified_date.strftime('%a, %d %b %Y %H:%M:%S %Z'))
def test_tally_list_data_view_valid_search_filter(self):
"""
Test that tally list data view returns the correct data
when a valid search filter is applied.
"""
tally_1 = create_tally(name='example_1_tally')
create_tally(name='example_2_tally')
view = views.TallyListDataView.as_view()
request = self.factory.get('/')
request.user = self.user
request.GET = request.GET.copy()
request.GET['search[value]'] = tally_1.name
response = view(request)
data = json.loads(response.content.decode())['data']
tally_id, tally_name, created_date, modified_formatted_date,\
admin_view_link, edit_link = data[0]
self.assertEquals(1, len(data))
self.assertEquals(
admin_view_link,
f'<a href="/super-administrator/{tally_1.id}/"'
' class ="btn btn-default btn-small">Admin View</a>')
self.assertEquals(
edit_link,
f'<a href="/tally-manager/update-tally/{tally_1.id}/"'
' class ="btn btn-default btn-small">Edit</a>')
self.assertEquals(tally_id, str(tally_1.id))
self.assertEquals(tally_name, tally_1.name)
self.assertEquals(created_date, str(tally_1.created_date))
self.assertEquals(
modified_formatted_date,
tally_1.modified_date.strftime('%a, %d %b %Y %H:%M:%S %Z'))
def test_tally_list_data_view_invalid_search_filter(self):
"""
Test that tally list data view returns no data when an invalid
search filter is applied.
"""
create_tally(name='example_1_tally')
create_tally(name='example_2_tally')
view = views.TallyListDataView.as_view()
request = self.factory.get('/')
request.user = self.user
request.GET = request.GET.copy()
request.GET['search[value]'] = 'Invalid search text'
response = view(request)
json_data = json.loads(response.content.decode())
self.assertListEqual([], json_data['data'])
self.assertEquals(2, json_data['recordsTotal'])
|
197712
|
import argparse
import json
import pandas as pd
from scipy.stats import pearsonr
def main(human_ann_file, metrics_file):
metrics = json.load(open(metrics_file))
metrics['gold_1'] = metrics['g1']
metrics['gold_2'] = metrics['g2']
metrics['gold_3'] = metrics['g3']
human_ann_df = pd.read_csv(human_ann_file)
unique_models = list(human_ann_df["Input.MODEL"].unique())
eval_categories = [
'Answer.counterfactual',
'Answer.ending',
'Answer.plot',
'Answer.premise',
'Answer.second'
]
print("{}\t{}\t{}\t{}".format("Model Name", "Human Acc", "Drift Similarity", "CFR"))
for ec in eval_categories:
print("`======= {} ".format(ec))
human_eval_numbers = []
drift_similarities = []
cfr_metrics = []
for um in unique_models:
if um == "gold_1" or um == "gold_2":
continue
model_df = human_ann_df[human_ann_df["Input.MODEL"] == um]
model_story_ann = model_df.groupby("Input.STORY").aggregate("mean")
ec_accuracy = (model_story_ann[ec] >= 2).mean()
original_name = um.split(".")[0]
if original_name not in metrics:
print("SKIPPING {}".format(original_name))
continue
print("{}\t{}\t{}\t{}".format(um, ec_accuracy, metrics[original_name]["drift_similarity"], metrics[original_name]["CFR_METRIC"]))
human_eval_numbers.append(ec_accuracy)
drift_similarities.append(metrics[original_name]["drift_similarity"])
cfr_metrics.append(metrics[original_name]["CFR_METRIC"])
drift_correl = pearsonr(human_eval_numbers, drift_similarities)
cfr_correl = pearsonr(human_eval_numbers, cfr_metrics)
print("DRIFT:\tCorrelation:\t{}\tP-value\t{}".format(drift_correl[0], drift_correl[1]))
print("CFR:\tCorrelation:\t{}\tP-value\t{}".format(cfr_correl[0], cfr_correl[1]))
print("\n\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='evaluate.py',
usage='%(prog)s gold_annotations predictions',
description='Evaluate story rewrite'
)
parser.add_argument('--human-ann-file', type=str,
dest="human_ann_file",
help='Location of human annotation file. Usually obtained from mturk download and named *.csv',
default=None)
parser.add_argument('--metrics-file', type=str,
dest="metrics_file",
help='Location of metrics file. Usually named metrics.json',
default=None)
args = parser.parse_args()
# Run seed selection if args valid
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args.human_ann_file, args.metrics_file)
|
197715
|
import logging
from aws_gate.constants import AWS_DEFAULT_PROFILE, AWS_DEFAULT_REGION
from aws_gate.decorators import (
plugin_version,
plugin_required,
valid_aws_profile,
valid_aws_region,
)
from aws_gate.query import query_instance
from aws_gate.session_common import BaseSession
from aws_gate.utils import (
get_aws_client,
get_aws_resource,
fetch_instance_details_from_config,
)
logger = logging.getLogger(__name__)
class SSMSession(BaseSession):
def __init__(
self,
instance_id,
region_name=AWS_DEFAULT_REGION,
profile_name=AWS_DEFAULT_REGION,
ssm=None,
):
self._instance_id = instance_id
self._region_name = region_name
self._profile_name = profile_name if profile_name is not None else ""
self._ssm = ssm
self._session_parameters = {"Target": self._instance_id}
@plugin_required
@plugin_version("1.1.23.0")
@valid_aws_profile
@valid_aws_region
def session(
config,
instance_name,
profile_name=AWS_DEFAULT_PROFILE,
region_name=AWS_DEFAULT_REGION,
):
instance, profile, region = fetch_instance_details_from_config(
config, instance_name, profile_name, region_name
)
ssm = get_aws_client("ssm", region_name=region, profile_name=profile)
ec2 = get_aws_resource("ec2", region_name=region, profile_name=profile)
instance_id = query_instance(name=instance, ec2=ec2)
if instance_id is None:
raise ValueError("No instance could be found for name: {}".format(instance))
logger.info(
"Opening session on instance %s (%s) via profile %s",
instance_id,
region,
profile,
)
with SSMSession(instance_id, region_name=region, ssm=ssm) as sess:
sess.open()
|
197727
|
class UnexpectedMode(ValueError):
def __init__(self, mode: str) -> None:
super().__init__(
f"Unexpected mode - found '{mode}' but must be 'image' or 'mesh'"
)
|
197797
|
import torch
from torch import nn
import torch.nn.functional as F
class SimpleCNN(nn.Module):
def __init__(self, num_of_conv, in_channels, out_channels, kernel_size, in_features, out_features=None, stride=1,
dilation=1, groups=1, bias=True, active_func=F.relu, pooling=F.max_pool1d,
dropout=0.5, padding_strategy="default", padding_list=None, fc_layer=True, include_map=False):
"""
:param num_of_conv: Follow kim cnn idea
:param kernel_size: if is int type, then make it into list, length equals to num_of_conv
if list type, then check the length of it, should has length of num_of_conv
:param out_features: feature size
"""
super(SimpleCNN, self).__init__()
if type(kernel_size) == int:
kernel_size = [kernel_size]
if len(kernel_size) != num_of_conv:
print("Number of kernel_size should be same num_of_conv")
exit(1)
if padding_list == None:
if padding_strategy == "default":
padding_list = [(k_size - 1, 0) for k_size in kernel_size]
self.include_map = include_map
self.conv = nn.ModuleList([nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(k_size, in_features),
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
for k_size, padding in zip(kernel_size, padding_list)])
self.pooling = pooling
self.active_func = active_func
self.fc_layer = fc_layer
if fc_layer:
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(num_of_conv * out_channels, out_features)
def forward(self, input):
if len(input.size()) == 3:
input = input.unsqueeze(1)
# input = (batch, in_channels, sent_len, word_dim)
x_map = [self.active_func(conv(input)).squeeze(3) for conv in self.conv]
# (batch, channel_output, ~=sent_len) * Ks
x = [self.pooling(i, i.size(2)).squeeze(2) for i in x_map] # max-over-time pooling
x = torch.cat(x, 1) # (batch, out_channels * Ks)
if self.fc_layer:
x = self.dropout(x)
x = self.fc(x)
if self.include_map == False:
return x
else:
return x, x_map
class CharCNN(SimpleCNN):
"""
Single CNN for char
input: Tensor (batch, sent_len, word_len, char_dim)
"""
def forward(self, input):
if len(input.size()) == 4:
input = input.unsqueeze(2)
# input = (batch, sent_len, in_channels, word_len, char_dim)
x = torch.stack([super(CharCNN, self).forward(input[i, :, :, :, :])
for i in range(input.size(0))], dim=0)
# x = (batch, sent_len, output_feature)
return x
|
197816
|
from functools import wraps
from time import sleep
from urllib.parse import quote, urlparse
def _url_valid(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except:
return False
def http_build_query(params, convention="%s"):
if len(params) == 0:
return ""
output = ""
for key in params.keys():
if type(params[key]) is dict:
output += http_build_query(params[key], convention % key + "[%s]")
elif type(params[key]) is list:
new_params = {str(i): element for i, element
in enumerate(params[key])}
output += http_build_query(
new_params, convention % key + "[%s]")
else:
val = quote(str(params[key]))
key = quote(key)
output = output + convention % key + "=" + val + "&"
return output
|
197828
|
import pygame
from character import Character
from copy import copy
block_00 = Character(0, 0, pygame.image.load('Map/Level 3/00.jpg'), {"x" : 0, "y" : 0})
block_01 = Character(1, 0, pygame.image.load('Map/Level 3/01.jpg'), {"x" : 1, "y" : 0})
block_02 = Character(2, 0, pygame.image.load('Map/Level 3/02.jpg'), {"x" : 2, "y" : 0})
block_03 = Character(3, 0, pygame.image.load('Map/Level 3/03.jpg'), {"x" : 3, "y" : 0})
block_10 = Character(0, 1, pygame.image.load('Map/Level 3/10.jpg'), {"x" : 0, "y" : 1})
block_11 = Character(1, 1, pygame.image.load('Map/Level 3/11.jpg'), {"x" : 1, "y" : 1})
block_12 = Character(2, 1, pygame.image.load('Map/Level 3/12.jpg'), {"x" : 2, "y" : 1})
block_13 = Character(3, 1, pygame.image.load('Map/Level 3/13.jpg'), {"x" : 3, "y" : 1})
block_20 = Character(0, 2, pygame.image.load('Map/Level 3/20.jpg'), {"x" : 0, "y" : 2})
block_21 = Character(1, 2, pygame.image.load('Map/Level 3/21.jpg'), {"x" : 1, "y" : 2})
block_22 = Character(2, 2, pygame.image.load('Map/Level 3/22.jpg'), {"x" : 2, "y" : 2})
block_23 = Character(3, 2, pygame.image.load('Map/Level 3/23.jpg'), {"x" : 3, "y" : 2})
block_31 = Character(1, 3, pygame.image.load('Map/Level 3/31.jpg'), {"x" : 1, "y" : 3})
block_32 = Character(2, 3, pygame.image.load('Map/Level 3/32.jpg'), {"x" : 2, "y" : 3})
block_33 = Character(3, 3, pygame.image.load('Map/Level 3/33.jpg'), {"x" : 3, "y" : 3})
level_3 = [
[block_00, block_01, block_02, block_03],
[block_10, block_11, block_12, block_13],
[block_20, block_21, block_22, block_23],
[None, block_31, block_32, block_33]
]
win = pygame.image.load('Map/Level 3/win.jpg')
win_click = pygame.image.load('Map/Level 3/win_click.jpg')
hint = pygame.image.load('Map/Level 3/hint.jpg')
sound_path = 'Sound/Level/Level 3.mp3'
class Level_3:
def __init__(self):
self.blocks = self.create_new_blocks()
self.win_images = [win, win_click, hint]
self.sound_path = sound_path
self.item_x = 720
self.item_y = 394
self.item_width = 160
self.item_height = 109
def create_new_blocks(self):
blocks = []
for y in range(4):
blocks.append([])
for x in range(4):
if level_3[y][x] is not None:
blocks[y].append(copy(level_3[y][x]))
print(blocks[y][x].x, blocks[y][x].y)
else:
blocks[y].append(None)
return blocks
|
197831
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import sys
kratos_benchmarking_path = '../../../../benchmarking'
sys.path.append(kratos_benchmarking_path)
import benchmarking
print("Building reference data for rotatingcone_PureConvection.py...")
benchmarking.BuildReferenceData("rotatingcone_PureConvectionBenchmarking.py", "rotatingcone_PureConvection_ref.txt")
|
197833
|
import json
import pytest
import sys
sys.path.insert(0, './dinghy_ping/')
import services.api as service
import models.data as data
with open('tests/multiple_domains.json') as f:
multiple_domains = json.load(f)
@pytest.fixture
def api():
return service.api
@pytest.fixture
def session(api):
return api.requests
def test_dinghy_ping_google_http(api):
r = api.requests.get("/ping/http/google.com")
assert r.status_code == 200
def test_dinghy_ping_google_http(api):
r = api.requests.get("/form-input-tcp-connection-test?tcp-endpoint=google.com&tcp-port=443")
assert r.status_code == 200
def test_dinghy_ping_google_https_and_query_params(api):
r = api.requests.get("/ping/https/www.google.com/search?source=hp&ei=aIHTW9mLNuOJ0gK8g624Ag&q=dinghy&btnK=Google+Search&oq=dinghy&gs_l=psy-ab.3..35i39l2j0i131j0i20i264j0j0i20i264j0l4.4754.5606..6143...1.0..0.585.957.6j5-1......0....1..gws-wiz.....6..0i67j0i131i20i264.oe0qJ9brs-8")
assert r.status_code == 200
def test_dinghy_ping_google_no_proto_set_and_query_params(api):
r = api.requests.get("/ping//www.google.com/search?source=hp&ei=aIHTW9mLNuOJ0gK8g624Ag&q=dinghy&btnK=Google+Search&oq=dinghy&gs_l=psy-ab.3..35i39l2j0i131j0i20i264j0j0i20i264j0l4.4754.5606..6143...1.0..0.585.957.6j5-1......0....1..gws-wiz.....6..0i67j0i131i20i264.oe0qJ9brs-8")
assert r.status_code == 200
"""
def test_multiple_domains_request_for_google(api):
r = api.requests.post(api.url_for("ping_multiple_domains"), json=multiple_domains)
response_json = r.json()
assert response_json['domains_response_results'][0]['domain_response_code'] == 200
def test_multiple_domains_request_for_google_with_params(api):
r = api.requests.post(api.url_for("ping_multiple_domains"), json=multiple_domains)
response_json = r.json()
assert response_json['domains_response_results'][1]['domain_response_code'] == 200
def test_multiple_domains_request_for_microsoft(api):
r = api.requests.post(api.url_for("ping_multiple_domains"), json=multiple_domains)
response_json = r.json()
assert response_json['domains_response_results'][2]['domain_response_code'] == 200
"""
def test_ping_saved_results(api):
api.requests.get("/ping/http/www.google.com")
p = service._get_all_pinged_urls()
assert "http://www.google.com/" in p
|
197872
|
import sys
import os
from discretizer import Discretizer
def main():
program_name = os.path.basename(sys.argv[0])
#Database name
db_files = {'yahoo': 'no_date_database.pdl'}
try:
db_names = sys.argv[1]
except IndexError:
raise Exception('No db name. Please, re-run as {0} dbname.pdl'.format(program_name))
if db_names == 'all':
discretizer = Discretizer(db_names, db_files)
else:
try:
discretizer = Discretizer(db_names, {db_names: db_files.get(db_names)})
except KeyError:
raise Exception('Invalid db name {0}. Please, check the name and re-run.'.format(db_names))
discretizer.load_db(check=False, fix=False, save_to_file=False)
corpus = discretizer.build_corpus()
stems = discretizer.build_stems(corpus)
stemmed_vocabulary = discretizer.build_vocabulary(stems)
distib_matrix = discretizer.build_distribution_matrix(stems)
# grouping
threads = discretizer.load_threads()
# discretization and sorting
threads = discretizer.compute_features(threads, stemmed_vocabulary, distib_matrix)
discretizer.save_csv(threads)
if __name__ == "__main__":
sys.exit(main())
"""db = Base('dotnet-v1.pydb', save_to_file=False)
db.open()
#recs = [r for r in db if r('type') == 'question' and r('answers') > 0]
rec = (db("type") == 'question') & (db("answers") > 0)
print len(rec)"""
|
197930
|
from dataclasses import make_dataclass
from typing import Dict, List
NAMES: List[str] = [
"uptake",
"TNF",
"trigger_iIkk",
"deact_TNFR",
"deact_ppIkk",
"deact_pnNfk",
"act_Ikk_by_TNF",
"act_pIkk",
"act_Ikb_by_Ikk",
"act_Nfk_by_Ikk",
"act_Nfk_by_Ikk_complex",
"act_Ikb_complex",
"form_complex",
"form_complex_nuc",
"ext_nNfkIkb",
"Vnuc",
"split_NfkpIkb",
"split_NfkIkb",
"int_Nfk",
"int_Ikb",
"eta_int_pNfk",
"degrad_Ikb",
"degrad_mIkb",
"degrad_RnaA20",
"degrad_A20",
"prod_Ikb",
"prod_mIkb_by_nNfk",
"build_RnaA20",
"build_A20",
"shuttle_RnaA20",
]
NUM: int = len(NAMES)
Parameters = make_dataclass(
cls_name="Parameters",
fields=[(name, int) for name in NAMES],
namespace={"NAMES": NAMES, "NUM": NUM},
frozen=True,
)
name2idx: Dict[str, int] = {k: v for v, k in enumerate(NAMES)}
C = Parameters(**name2idx)
del name2idx
|
197951
|
import os,sys,time
import numpy as np
import copy
import math
import torch
import torch.nn.functional as F
from .utils import BayesianSGD
class Appr(object):
def __init__(self,model,args,lr_min=1e-6,lr_factor=3,lr_patience=5,clipgrad=1000):
self.model=model
self.device = args.device
self.lr_min=lr_min
self.lr_factor=lr_factor
self.lr_patience=lr_patience
self.clipgrad=clipgrad
self.init_lr=args.lr
self.sbatch=args.sbatch
self.nepochs=args.nepochs
self.arch=args.arch
self.samples=args.samples
self.lambda_=1.
self.output=args.output
self.checkpoint = args.checkpoint
self.experiment=args.experiment
self.num_tasks=args.num_tasks
self.modules_names_with_cls = self.find_modules_names(with_classifier=True)
self.modules_names_without_cls = self.find_modules_names(with_classifier=False)
def train(self,t,xtrain,ytrain,xvalid,yvalid):
# Update the next learning rate for each parameter based on their uncertainty
params_dict = self.update_lr(t)
self.optimizer = BayesianSGD(params=params_dict)
best_loss=np.inf
# best_model=copy.deepcopy(self.model)
best_model = copy.deepcopy(self.model.state_dict())
lr = self.init_lr
patience = self.lr_patience
# Loop epochs
try:
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,
1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),
train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss, 100 * valid_acc), end='')
if math.isnan(valid_loss) or math.isnan(train_loss):
print("saved best model and quit because loss became nan")
break
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=copy.deepcopy(self.model.state_dict())
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
params_dict = self.update_lr(t, adaptive_lr=True, lr=lr)
self.optimizer=BayesianSGD(params=params_dict)
print()
except KeyboardInterrupt:
print()
# Restore best
self.model.load_state_dict(copy.deepcopy(best_model))
self.save_model(t)
def update_lr(self,t, lr=None, adaptive_lr=False):
params_dict = []
if t==0:
params_dict.append({'params': self.model.parameters(), 'lr': self.init_lr})
else:
for name in self.modules_names_without_cls:
n = name.split('.')
if len(n) == 1:
m = self.model._modules[n[0]]
elif len(n) == 3:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]
elif len(n) == 4:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]._modules[n[3]]
else:
print (name)
if adaptive_lr is True:
params_dict.append({'params': m.weight_rho, 'lr': lr})
params_dict.append({'params': m.bias_rho, 'lr': lr})
else:
w_unc = torch.log1p(torch.exp(m.weight_rho.data))
b_unc = torch.log1p(torch.exp(m.bias_rho.data))
params_dict.append({'params': m.weight_mu, 'lr': torch.mul(w_unc,self.init_lr)})
params_dict.append({'params': m.bias_mu, 'lr': torch.mul(b_unc,self.init_lr)})
params_dict.append({'params': m.weight_rho, 'lr':self.init_lr})
params_dict.append({'params': m.bias_rho, 'lr':self.init_lr})
return params_dict
def find_modules_names(self, with_classifier=False):
modules_names = []
for name, p in self.model.named_parameters():
if with_classifier is False:
if not name.startswith('classifier'):
n = name.split('.')[:-1]
modules_names.append('.'.join(n))
else:
n = name.split('.')[:-1]
modules_names.append('.'.join(n))
modules_names = set(modules_names)
return modules_names
def logs(self,t):
lp, lvp = 0.0, 0.0
for name in self.modules_names_without_cls:
n = name.split('.')
if len(n) == 1:
m = self.model._modules[n[0]]
elif len(n) == 3:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]
elif len(n) == 4:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]._modules[n[3]]
lp += m.log_prior
lvp += m.log_variational_posterior
lp += self.model.classifier[t].log_prior
lvp += self.model.classifier[t].log_variational_posterior
return lp, lvp
def train_epoch(self,t,x,y):
self.model.train()
r=np.arange(x.size(0))
np.random.shuffle(r)
r=torch.LongTensor(r).to(self.device)
num_batches = len(x)//self.sbatch
j=0
# Loop batches
for i in range(0,len(r),self.sbatch):
if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]
else: b=r[i:]
images, targets = x[b].to(self.device), y[b].to(self.device)
# Forward
loss=self.elbo_loss(images,targets,t,num_batches,sample=True).to(self.device)
# Backward
self.model.cuda()
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.model.cuda()
# Update parameters
self.optimizer.step()
return
def eval(self,t,x,y,debug=False):
total_loss=0
total_acc=0
total_num=0
self.model.eval()
r=np.arange(x.size(0))
r=torch.as_tensor(r, device=self.device, dtype=torch.int64)
with torch.no_grad():
num_batches = len(x)//self.sbatch
# Loop batches
for i in range(0,len(r),self.sbatch):
if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]
else: b=r[i:]
images, targets = x[b].to(self.device), y[b].to(self.device)
# Forward
outputs=self.model(images,sample=False)
output=outputs[t]
loss = self.elbo_loss(images, targets, t, num_batches,sample=False,debug=debug)
_,pred=output.max(1, keepdim=True)
total_loss += loss.detach()*len(b)
total_acc += pred.eq(targets.view_as(pred)).sum().item()
total_num += len(b)
return total_loss/total_num, total_acc/total_num
def set_model_(model, state_dict):
model.model.load_state_dict(copy.deepcopy(state_dict))
def elbo_loss(self, input, target, t, num_batches, sample,debug=False):
if sample:
lps, lvps, predictions = [], [], []
for i in range(self.samples):
predictions.append(self.model(input,sample=sample)[t])
lp, lv = self.logs(t)
lps.append(lp)
lvps.append(lv)
# hack
w1 = 1.e-3
w2 = 1.e-3
w3 = 5.e-2
outputs = torch.stack(predictions,dim=0).to(self.device)
log_var = w1*torch.as_tensor(lvps, device=self.device).mean()
log_p = w2*torch.as_tensor(lps, device=self.device).mean()
nll = w3*torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum').to(device=self.device)
return (log_var - log_p)/num_batches + nll
else:
predictions = []
for i in range(self.samples):
pred = self.model(input,sample=False)[t]
predictions.append(pred)
# hack
# w1 = 1.e-3
# w2 = 1.e-3
w3 = 5.e-6
outputs = torch.stack(predictions,dim=0).to(self.device)
nll = w3*torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum').to(device=self.device)
return nll
# w1, w2, w3 = self.get_coefs(nll,log_var,log_p,num_batches)
# print ("New coefficients for task {} are w1={}, w2={}, w3={}".format(t,w1,w2,w3))
# if math.isnan(log_var) or math.isnan(log_p) or math.isnan(nll):
# nll = torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum')
# # if log_var > 1e3 or log_p > 1e3 or nll>1e3:
# print ("BEFORE: ", (log_var/num_batches).item(), (log_p / num_batches).item(), nll.item())
# # while math.isnan(nll):
# # nll = 1e-5*torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum')
def save_model(self,t):
torch.save({'model_state_dict': self.model.state_dict(),
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(t)))
# def get_coefs(self,nll,log_var,log_p,num_batches):
# def take_n(num):
# return torch.log10(num).item()
#
# exponents = np.array([take_n(num) for num in [nll, log_p, log_var]])
# min_exp = exponents.min()
# min_exp_idx = np.argmin(exponents)
# if min_exp_idx == 0:
# w1 = (10**(3-(take_n(log_var)+min_exp)))*num_batches
# w2 = (10**-(3-(take_n(log_p)+min_exp)))*num_batches
# w3 = 10.**(3-min_exp_idx)
# if min_exp_idx == 1:
# w1 = (10**(3-(take_n(log_var)+min_exp)))*num_batches
# w3 = 10**(3-(take_n(nll)+min_exp))
# w2 = (10.**-(3-min_exp_idx))*num_batches
# if min_exp_idx == 2:
# w3 = 10**(3-(take_n(nll)+min_exp))
# w2 = (10**-(3-(take_n(log_p)+min_exp)))*num_batches
# w1 = (10.**(3-min_exp_idx))*num_batches
#
# return w1, w2, w3
|
197997
|
import unittest
import numpy as np
from pax.datastructure import Event, Pulse
from pax import core
class TestZLE(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='XENON100',
just_testing=True,
config_dict={
'pax': {
'plugin_group_names': ['test'],
'test': 'ZLE.SoftwareZLE'},
'ZLE.SoftwareZLE': {
'zle_threshold': 40,
'samples_to_store_before': 50,
'samples_to_store_after': 50,
'max_intervals': 32,
'special_thresholds': {}
}})
self.plugin = self.pax.get_plugin_by_name('SoftwareZLE')
def tearDown(self):
delattr(self, 'pax')
delattr(self, 'plugin')
def test_zle(self):
for w, pulse_bounds_should_be in (
([60, 60], [[0, 1]]),
([0, 60, 60, 0], [[0, 3]]),
([1] * 100 + [60] + [2] * 100, [[50, 149]]),
([1] * 100 + [30] + [2] * 100, []),
([1] * 100 + [60] + [2] * 200 + [60] + [3] * 100, [[50, 149], [252, 351]]),
([1] * 100 + [60] + [2] * 70 + [60] + [3] * 100, [[50, 100 + 1 + 70 + 1 + 50 - 1]]),
):
w = np.array(w).astype(np.int16)
# Convert from ADC above baseline (easier to specify) to raw ADC counts (what the plugin needs)
w = self.plugin.config['digitizer_reference_baseline'] - w
e = Event(n_channels=self.plugin.config['n_channels'],
start_time=0,
stop_time=int(1e6),
sample_duration=self.pax.config['DEFAULT']['sample_duration'],
pulses=[Pulse(left=0,
channel=1,
raw_data=w)])
e = self.plugin.transform_event(e)
pulse_bounds = [[pulse.left, pulse.right] for pulse in e.pulses]
# Check the pulse bounds
self.assertEqual(pulse_bounds, pulse_bounds_should_be)
# Check if the data was put in correctly
for i, (l, r) in enumerate(pulse_bounds):
self.assertEqual(e.pulses[i].raw_data.tolist(), w[l:r + 1].tolist())
if __name__ == '__main__':
unittest.main()
|
198025
|
import unittest
from tests.recipes.recipe_lib_test import BaseTestForMakeRecipe
class TestLibcurlRecipe(BaseTestForMakeRecipe, unittest.TestCase):
"""
An unittest for recipe :mod:`~pythonforandroid.recipes.libcurl`
"""
recipe_name = "libcurl"
sh_command_calls = ["./configure"]
|
198046
|
import streamlit as st
def app():
st.title("Create an interactive map")
backend = st.selectbox(
"Select a plotting backend",
["ipyleaflet", "folium", "heremap", "keperl.gl"],
index=1,
)
if backend == "ipyleaflet":
with st.echo():
import leafmap.leafmap as leafmap
elif backend == "folium":
with st.echo():
import leafmap.foliumap as leafmap
elif backend == "heremap":
with st.echo():
import leafmap.heremap as leafmap
elif backend == "keperl.gl":
with st.echo():
import leafmap.kepler as leafmap
with st.echo():
m = leafmap.Map()
m.to_streamlit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.