id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
453271
|
from __future__ import print_function
from math import ceil
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def diagonal_sum(n):
total = 1
for i in xrange(1, int(ceil(n / 2.0))):
odd = 2 * i + 1
even = 2 * i
total = total + 4 * odd ** 2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(diagonal_sum(1001))
else:
try:
n = int(sys.argv[1])
diagonal_sum(n)
except ValueError:
print("Invalid entry - please enter a number")
|
453313
|
from pathlib import Path # pylint: disable=unused-import
from os.path import expanduser # pylint: disable=unused-import
import pytest
from .context import load_tweet_sentiment_train, CLASS_VALUES
@pytest.mark.skipif('not Path(expanduser("~/.sadedegel_data/tweet_sentiment")).exists()')
def test_data_load():
data = load_tweet_sentiment_train()
for i, row in enumerate(data):
assert any(key in row.keys() for key in ['id', 'tweet', 'sentiment_class'])
assert isinstance(row['id'], str)
assert isinstance(row['tweet'], str)
assert CLASS_VALUES[row['sentiment_class']] in ['POSITIVE', 'NEGATIVE']
assert i == 11116
|
453348
|
self.description = 'download a remote package with -U'
self.require_capability("curl")
url = self.add_simple_http_server({})
self.args = '-Uw {url}/foo.pkg'.format(url=url)
self.addrule('!PACMAN_RETCODE=0')
self.addrule('!CACHE_FEXISTS=foo.pkg')
self.addrule('!CACHE_FEXISTS=foo.pkg.sig')
|
453351
|
from datetime import datetime
from couchdbkit.ext.django.schema import Document, StringProperty, \
DateTimeProperty
class Post(Document):
author = StringProperty()
title = StringProperty()
content = StringProperty()
date = DateTimeProperty(default=datetime.utcnow)
class Comment(Document):
author = StringProperty()
content = StringProperty()
date = DateTimeProperty(default=datetime.utcnow)
post = StringProperty()
|
453381
|
import doctest
import sys
if sys.version_info >= (3,):
def load_tests(loader, tests, ignore):
return doctest.DocFileSuite('../README.rst')
|
453401
|
from dolfin import *
from xii import *
from block import block_bc
def setup_problem(radius, mesh_gen):
'''
This is 3d-1d system inspired by https://arxiv.org/abs/1803.04896
There is a Robin forcing f on the entire 3d bdry and g 1d bdry.
No multiplier.
'''
# FIXME: Dirichlet bcs
mesh3d, mesh1d, bdry_vertex = mesh_gen()
# There is a 3d/1d/iface/bdry conductiity; made up
k3d, k1d, kG, kbdry = list(map(Constant, (0.1, 1, 0.5, 0.2)))
# f is made up
f = Expression('pow(x[0]-0.5, 2)+(x[1]-0.5, 2)+(x[2]-0.5, 2)', degree=2)
# g is 1 at bdry vertex and then decays with distance
g = Expression('exp(-sqrt(pow(x[0]-x0, 2)+pow(x[1]-x1, 2)+pow(x[2]-x2, 2)))',
degree=4,
x0=bdry_vertex[0], x1=bdry_vertex[1], x2=bdry_vertex[2])
dxG = Measure('dx', domain=mesh1d)
V3 = FunctionSpace(mesh3d, 'CG', 1)
V = FunctionSpace(mesh1d, 'CG', 1)
W = (V3, V)
u3, u = list(map(TrialFunction, W))
v3, v = list(map(TestFunction, W))
averaging_shape = Circle(radius, 10)
Pi_u3, Pi_v3 = (Average(x, mesh1d, averaging_shape) for x in (u3, v3))
a = [[0]*len(W) for _ in range(len(W))]
a[0][0] = inner(k3d*grad(u3), grad(v3))*dx +\
inner(kbdry*u3, v3)*ds + \
inner(Constant(2*pi*radius)*kG*Pi_u3, Pi_v3)*dxG
a[1][1] = inner(Constant(pi*radius**2)*k1d*grad(u), grad(v))*dxG +\
inner(u, v)*ds +\
inner(Constant(2*pi*radius)*kG*u, v)*dxG
a[0][1] = -inner(Constant(2*pi*radius)*kG*u, Pi_v3)*dxG
a[1][0] = -inner(Constant(2*pi*radius)*kG*v, Pi_u3)*dxG
L = [inner(kbdry*f, v3)*ds,
inner(kbdry*g, v)*dxG]
AA, bb = list(map(ii_assemble, (a, L)))
# Return the block system
return AA, bb, W
# --------------------------------------------------------------------
if __name__ == '__main__':
import meshing, argparse, sys, petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Problem params
parser.add_argument('-scale', default=0.25, type=float,
help='Scale mesh size relative to definition in geo')
parser.add_argument('-npoints', type=int, default=80,
help='Num points to draw the curve')
parser.add_argument('-radius', type=float, default=0.1,
help='Radius of the tubes')
args, petsc_args = parser.parse_known_args()
# Let's rock
mesh_gen = lambda: meshing.load(args.scale,
lambda mesh: meshing.fun(mesh, npoints=args.npoints))
timer = Timer('setup'); timer.start()
AA, bb, W = setup_problem(args.radius, mesh_gen)
print('\tProblem setup took %g s\n \tNumber of unknowns %d ' % (timer.stop(), sum(Wi.dim() for Wi in W)))
x = AA*bb
y = AA.transpmult(bb)
assert (x-y).norm() < 1E-14, 'AA is not symmetric!'
wh = ii_Function(W)
timer = Timer('solver'); timer.start()
# Convert
AAm, bbm = list(map(ii_convert, (AA, bb)))
niters = LUSolver('umfpack').solve(AAm, wh.vector(), bbm)
print('\tSolver took %g s. Niters %d' % (timer.stop(), niters))
for i, wh_i in enumerate(wh):
# Renaming to make it easier to save state in Visit/Pareview
wh_i.rename('u', str(i))
File('nothas_rad%g_scale%g_u%d.pvd' % (args.radius, args.scale, i)) << wh_i
|
453420
|
import ocean
for dtype in [ocean.int16, ocean.float, ocean.chalf] :
a = ocean.tensor([],dtype,ocean.gpu[0])
a.fill(24)
print(a)
a.fill(36)
print(a)
|
453424
|
import requests
import json
#Archive-It creds
aitAccount = ""
aitUser = ""
aitPassword = ""
#setup Archive-It auth session
aitSession = requests.Session()
if len(aitUser) > 0 and len(aitUser) > 0 and len(aitUser) > 0:
aitSession.auth = (str(aitUser), str(aitPassword))
# URL to request
requestURL = "https://partner.archive-it.org/api/collection?id=3308"
#make actual request
print ("requesting " + requestURL)
requestResult = aitSession.get(requestURL)
#checking for errors
if requestResult.status_code != requests.codes.ok:
print ("There was an error with your request, reponse --> " + str(requestResult.status_code))
requestResult.raise_for_status()
else:
# pretty print json to console
print (json.dumps(requestResult.json(), indent=2))
|
453442
|
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from federated.utils.rfa import create_rfa_averaging
class RFATest(tf.test.TestCase):
"""Class for testing RFA."""
def get_test_data(self) -> tf.data.Dataset:
"""Creates data for RFA tests.
Returns:
tf.data.Dataset: Dataset.
"""
random = np.random.RandomState(0)
X = random.rand(10, 10).astype(np.float32)
y = random.rand(10, 1).astype(np.float32)
return [
tf.data.Dataset.from_tensor_slices(
collections.OrderedDict(x=X[i : i + 1], y=y[i : i + 1])
).batch(1)
for i in range(X.shape[0])
]
def create_model(self) -> tf.keras.Model:
"""Creates model for RFA tests.
Returns:
tf.keras.Model: Model.
"""
def create_model_fn():
keras_model = tf.keras.models.Sequential(
[
tf.keras.layers.Input(shape=(10,)),
tf.keras.layers.Dense(
1, kernel_initializer="zeros", use_bias=False
),
]
)
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=self.get_test_data()[0].element_spec,
loss=tf.keras.losses.MeanSquaredError(),
)
return create_model_fn
def test_rfa(self):
"""Function for testing RFA."""
create_model_fn = self.create_model()
dataset = self.get_test_data()
iterative_process = create_rfa_averaging(
create_model_fn,
iterations=2,
v=1e-6,
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.01),
)
state = iterative_process.initialize()
state, _ = iterative_process.next(state, dataset)
self.assertIsInstance(state, tff.learning.framework.optimizer_utils.ServerState)
if __name__ == "__main__":
tf.test.main()
|
453474
|
import pandas as pd
import matplotlib.pyplot as plt
from patsy import dmatrices
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# part 1
print("part 1")
fname = "https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data"
fname = "../abalone.data"
df = pd.read_csv(fname,
names=["sex", "length", "diam", "height", "wt_whole", "wt_shucked", "wt_viscera", "wt_shell", "rings"],
index_col=False)
# part 2
print("part 2")
y, X = dmatrices("rings ~ C(sex) + length + diam + height + wt_whole + wt_shucked + wt_viscera + wt_shell",
df, return_type="dataframe")
X.drop(X[['Intercept']], axis=1, inplace=True)
# part 3
print("part 3")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# part 4
print("part 4")
model = LinearRegression()
model.fit(X_train, y_train)
predicted = model.predict(X_test)
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.scatter(predicted, y_test)
ax.set_xlabel(r'predicted')
ax.set_ylabel(r'true')
ax.set_title("Number of Rings in Abalone")
plt.savefig('regression.png')
|
453480
|
import os
import sys
import requests
import logging
from splunklib.modularinput import *
def do_work(input_name, ew, path):
filepath = os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', 'SplunkForPCAP', 'bin', 'InputHistory.log')
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
t_path=os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', 'SplunkForPCAP', 'bin')
filename = 'InputHistory.log'
with open(os.path.join(t_path, filename), 'a+') as f:
f.write(path+';'+'\n')
f.close()
lines = open(os.path.join(t_path, filename), 'r').readlines()
lines_set = set(lines)
out = open(os.path.join(t_path, filename), 'w')
for line in lines_set:
out.write(line)
class MyScript(Script):
def get_scheme(self):
scheme = Scheme("PCAP File Location")
scheme.description = "Location of PCAP files to be analyzed"
scheme.use_external_validation = True
scheme.use_single_instance = True
path_argument = Argument("path")
path_argument.data_type = Argument.data_type_string
path_argument.description = "Please specify the full path of the PCAP file location"
path_argument.required_on_create = True
scheme.add_argument(path_argument)
return scheme
def validate_input(self, validation_definition):
logging.error("PCAP4LIFE")
path = str(validation_definition.parameters["path"])
logging.error("path %s" % path)
if len(symbol) < 1:
raise ValueError("Incorrect Path")
def stream_events(self, inputs, ew):
for input_name, input_item in inputs.inputs.iteritems():
path = str(input_item["path"])
do_work(input_name, ew, path)
if __name__ == "__main__":
MyScript().run(sys.argv)
|
453495
|
import os
import sys
import functools
import operator
import weakref
import inspect
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = (str,)
else:
string_types = (basestring,)
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
class _ObjectProxyMethods(object):
# We use properties to override the values of __module__ and
# __doc__. If we add these in ObjectProxy, the derived class
# __dict__ will still be setup to have string variants of these
# attributes and the rules of descriptors means that they appear to
# take precedence over the properties in the base class. To avoid
# that, we copy the properties into the derived class type itself
# via a meta class. In that way the properties will always take
# precedence.
@property
def __module__(self):
return self.__wrapped__.__module__
@__module__.setter
def __module__(self, value):
self.__wrapped__.__module__ = value
@property
def __doc__(self):
return self.__wrapped__.__doc__
@__doc__.setter
def __doc__(self, value):
self.__wrapped__.__doc__ = value
# We similar use a property for __dict__. We need __dict__ to be
# explicit to ensure that vars() works as expected.
@property
def __dict__(self):
return self.__wrapped__.__dict__
# Need to also propagate the special __weakref__ attribute for case
# where decorating classes which will define this. If do not define
# it and use a function like inspect.getmembers() on a decorator
# class it will fail. This can't be in the derived classes.
@property
def __weakref__(self):
return self.__wrapped__.__weakref__
class _ObjectProxyMetaType(type):
def __new__(cls, name, bases, dictionary):
# Copy our special properties into the class so that they
# always take precedence over attributes of the same name added
# during construction of a derived class. This is to save
# duplicating the implementation for them in all derived classes.
dictionary.update(vars(_ObjectProxyMethods))
return type.__new__(cls, name, bases, dictionary)
class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
__slots__ = "__wrapped__"
def __init__(self, wrapped):
object.__setattr__(self, "__wrapped__", wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, "__qualname__", wrapped.__qualname__)
except AttributeError:
pass
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
@property
def __annotations__(self):
return self.__wrapped__.__annotations__
@__annotations__.setter
def __annotations__(self, value):
self.__wrapped__.__annotations__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if PY3:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return "<{} at 0x{:x} for {} at 0x{:x}>".format(
type(self).__name__,
id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__),
)
def __reversed__(self):
return reversed(self.__wrapped__)
if PY3:
def __round__(self):
return round(self.__wrapped__)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith("_self_"):
object.__setattr__(self, name, value)
elif name == "__wrapped__":
object.__setattr__(self, name, value)
try:
object.__delattr__(self, "__qualname__")
except AttributeError:
pass
try:
object.__setattr__(self, "__qualname__", value.__qualname__)
except AttributeError:
pass
elif name == "__qualname__":
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == "__wrapped__":
raise ValueError("wrapper has not been initialised")
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith("_self_"):
object.__delattr__(self, name)
elif name == "__wrapped__":
raise TypeError("__wrapped__ must be an object")
elif name == "__qualname__":
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __complex__(self):
return complex(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
def __copy__(self):
raise NotImplementedError("object proxy must define __copy__()")
def __deepcopy__(self, memo):
raise NotImplementedError("object proxy must define __deepcopy__()")
def __reduce__(self):
raise NotImplementedError("object proxy must define __reduce_ex__()")
def __reduce_ex__(self, protocol):
raise NotImplementedError("object proxy must define __reduce_ex__()")
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
class PartialCallableObjectProxy(ObjectProxy):
def __init__(self, *args, **kwargs):
if len(args) < 1:
raise TypeError("partial type takes at least one argument")
wrapped, args = args[0], args[1:]
if not callable(wrapped):
raise TypeError("the first argument must be callable")
super(PartialCallableObjectProxy, self).__init__(wrapped)
self._self_args = args
self._self_kwargs = kwargs
def __call__(self, *args, **kwargs):
_args = self._self_args + args
_kwargs = dict(self._self_kwargs)
_kwargs.update(kwargs)
return self.__wrapped__(*_args, **_kwargs)
class _FunctionWrapperBase(ObjectProxy):
__slots__ = (
"_self_instance",
"_self_wrapper",
"_self_enabled",
"_self_binding",
"_self_parent",
)
def __init__(
self, wrapped, instance, wrapper, enabled=None, binding="function", parent=None
):
super(_FunctionWrapperBase, self).__init__(wrapped)
object.__setattr__(self, "_self_instance", instance)
object.__setattr__(self, "_self_wrapper", wrapper)
object.__setattr__(self, "_self_enabled", enabled)
object.__setattr__(self, "_self_binding", binding)
object.__setattr__(self, "_self_parent", parent)
def __get__(self, instance, owner):
# This method is actually doing double duty for both unbound and
# bound derived wrapper classes. It should possibly be broken up
# and the distinct functionality moved into the derived classes.
# Can't do that straight away due to some legacy code which is
# relying on it being here in this base class.
#
# The distinguishing attribute which determines whether we are
# being called in an unbound or bound wrapper is the parent
# attribute. If binding has never occurred, then the parent will
# be None.
#
# First therefore, is if we are called in an unbound wrapper. In
# this case we perform the binding.
#
# We have one special case to worry about here. This is where we
# are decorating a nested class. In this case the wrapped class
# would not have a __get__() method to call. In that case we
# simply return self.
#
# Note that we otherwise still do binding even if instance is
# None and accessing an unbound instance method from a class.
# This is because we need to be able to later detect that
# specific case as we will need to extract the instance from the
# first argument of those passed in.
if self._self_parent is None:
if not inspect.isclass(self.__wrapped__):
descriptor = self.__wrapped__.__get__(instance, owner)
return self.__bound_function_wrapper__(
descriptor,
instance,
self._self_wrapper,
self._self_enabled,
self._self_binding,
self,
)
return self
# Now we have the case of binding occurring a second time on what
# was already a bound function. In this case we would usually
# return ourselves again. This mirrors what Python does.
#
# The special case this time is where we were originally bound
# with an instance of None and we were likely an instance
# method. In that case we rebind against the original wrapped
# function from the parent again.
if self._self_instance is None and self._self_binding == "function":
descriptor = self._self_parent.__wrapped__.__get__(instance, owner)
return self._self_parent.__bound_function_wrapper__(
descriptor,
instance,
self._self_wrapper,
self._self_enabled,
self._self_binding,
self._self_parent,
)
return self
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# This can occur where initial function wrapper was applied to
# a function that was already bound to an instance. In that case
# we want to extract the instance from the function and use it.
if self._self_binding == "function":
if self._self_instance is None:
instance = getattr(self.__wrapped__, "__self__", None)
if instance is not None:
return self._self_wrapper(self.__wrapped__, instance, args, kwargs)
# This is generally invoked when the wrapped function is being
# called as a normal function and is not bound to a class as an
# instance method. This is also invoked in the case where the
# wrapped function was a method, but this wrapper was in turn
# wrapped using the staticmethod decorator.
return self._self_wrapper(self.__wrapped__, self._self_instance, args, kwargs)
class BoundFunctionWrapper(_FunctionWrapperBase):
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are
# likely wrapping an instance method vs a static method or class
# method.
if self._self_binding == "function":
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance
# as the first argument. We need to shift the args before
# making the call to the wrapper and effectively bind the
# instance to the wrapped function using a partial so the
# wrapper doesn't see anything as being different.
if not args:
raise TypeError("missing 1 required positional argument")
instance, args = args[0], args[1:]
wrapped = PartialCallableObjectProxy(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(
self.__wrapped__, self._self_instance, args, kwargs
)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, "__self__", None)
return self._self_wrapper(self.__wrapped__, instance, args, kwargs)
class FunctionWrapper(_FunctionWrapperBase):
__bound_function_wrapper__ = BoundFunctionWrapper
def __init__(self, wrapped, wrapper, enabled=None):
# What it is we are wrapping here could be anything. We need to
# try and detect specific cases though. In particular, we need
# to detect when we are given something that is a method of a
# class. Further, we need to know when it is likely an instance
# method, as opposed to a class or static method. This can
# become problematic though as there isn't strictly a fool proof
# method of knowing.
#
# The situations we could encounter when wrapping a method are:
#
# 1. The wrapper is being applied as part of a decorator which
# is a part of the class definition. In this case what we are
# given is the raw unbound function, classmethod or staticmethod
# wrapper objects.
#
# The problem here is that we will not know we are being applied
# in the context of the class being set up. This becomes
# important later for the case of an instance method, because in
# that case we just see it as a raw function and can't
# distinguish it from wrapping a normal function outside of
# a class context.
#
# 2. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved direct from the __dict__ of the class
# type. This is effectively the same as (1) above.
#
# 3. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved from the class type. In this case
# binding will have been performed where the instance against
# which the method is bound will be None at that point.
#
# This case is a problem because we can no longer tell if the
# method was a static method, plus if using Python3, we cannot
# tell if it was an instance method as the concept of an
# unnbound method no longer exists.
#
# 4. The wrapper is being applied when performing monkey
# patching of an instance of a class. In this case binding will
# have been perfomed where the instance was not None.
#
# This case is a problem because we can no longer tell if the
# method was a static method.
#
# Overall, the best we can do is look at the original type of the
# object which was wrapped prior to any binding being done and
# see if it is an instance of classmethod or staticmethod. In
# the case where other decorators are between us and them, if
# they do not propagate the __class__ attribute so that the
# isinstance() checks works, then likely this will do the wrong
# thing where classmethod and staticmethod are used.
#
# Since it is likely to be very rare that anyone even puts
# decorators around classmethod and staticmethod, likelihood of
# that being an issue is very small, so we accept it and suggest
# that those other decorators be fixed. It is also only an issue
# if a decorator wants to actually do things with the arguments.
#
# As to not being able to identify static methods properly, we
# just hope that that isn't something people are going to want
# to wrap, or if they do suggest they do it the correct way by
# ensuring that it is decorated in the class definition itself,
# or patch it in the __dict__ of the class type.
#
# So to get the best outcome we can, whenever we aren't sure what
# it is, we label it as a 'function'. If it was already bound and
# that is rebound later, we assume that it will be an instance
# method and try an cope with the possibility that the 'self'
# argument it being passed as an explicit argument and shuffle
# the arguments around to extract 'self' for use as the instance.
if isinstance(wrapped, classmethod):
binding = "classmethod"
elif isinstance(wrapped, staticmethod):
binding = "staticmethod"
elif hasattr(wrapped, "__self__"):
if inspect.isclass(wrapped.__self__):
binding = "classmethod"
else:
binding = "function"
else:
binding = "function"
super(FunctionWrapper, self).__init__(wrapped, None, wrapper, enabled, binding)
try:
if not os.environ.get("WRAPT_DISABLE_EXTENSIONS"):
from ._wrappers import (
ObjectProxy,
CallableObjectProxy,
PartialCallableObjectProxy,
FunctionWrapper,
BoundFunctionWrapper,
_FunctionWrapperBase,
)
except ImportError:
pass
# Helper functions for applying wrappers to existing functions.
def resolve_path(module, name):
if isinstance(module, string_types):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split(".")
attribute = path[0]
original = getattr(parent, attribute)
for attribute in path[1:]:
parent = original
# We can't just always use getattr() because in doing
# that on a class it will cause binding to occur which
# will complicate things later and cause some things not
# to work. For the case of a class we therefore access
# the __dict__ directly. To cope though with the wrong
# class being given to us, or a method being moved into
# a base class, we need to walk the class hierarchy to
# work out exactly which __dict__ the method was defined
# in, as accessing it from __dict__ will fail if it was
# not actually on the class given. Fallback to using
# getattr() if we can't find it. If it truly doesn't
# exist, then that will fail.
if inspect.isclass(original):
for cls in inspect.getmro(original):
if attribute in vars(cls):
original = vars(cls)[attribute]
break
else:
original = getattr(original, attribute)
else:
original = getattr(original, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for applying a proxy object to an attribute of a class
# instance. The wrapper works by defining an attribute of the same name
# on the class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __delete__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit(".", 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Functions for creating a simple decorator using a FunctionWrapper,
# plus short cut functions for applying wrappers to functions. These are
# for use when doing monkey patching. For a more featured way of
# creating decorators see the decorator decorator instead.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# A weak function proxy. This will work on instance methods, class
# methods, static methods and regular functions. Special treatment is
# needed for the method types because the bound method is effectively a
# transient object and applying a weak reference to one will immediately
# result in it being destroyed and the weakref callback called. The weak
# reference is therefore applied to the instance the method is bound to
# and the original function. The function is then rebound at the point
# of a call via the weak function proxy.
def _weak_function_proxy_callback(ref, proxy, callback):
if proxy._self_expired:
return
proxy._self_expired = True
# This could raise an exception. We let it propagate back and let
# the weakref.proxy() deal with it, at which point it generally
# prints out a short error message direct to stderr and keeps going.
if callback is not None:
callback(proxy)
class WeakFunctionProxy(ObjectProxy):
__slots__ = ("_self_expired", "_self_instance")
def __init__(self, wrapped, callback=None):
# We need to determine if the wrapped function is actually a
# bound method. In the case of a bound method, we need to keep a
# reference to the original unbound function and the instance.
# This is necessary because if we hold a reference to the bound
# function, it will be the only reference and given it is a
# temporary object, it will almost immediately expire and
# the weakref callback triggered. So what is done is that we
# hold a reference to the instance and unbound function and
# when called bind the function to the instance once again and
# then call it. Note that we avoid using a nested function for
# the callback here so as not to cause any odd reference cycles.
_callback = callback and functools.partial(
_weak_function_proxy_callback, proxy=self, callback=callback
)
self._self_expired = False
if isinstance(wrapped, _FunctionWrapperBase):
self._self_instance = weakref.ref(wrapped._self_instance, _callback)
if wrapped._self_parent is not None:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped._self_parent, _callback)
)
else:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback)
)
return
try:
self._self_instance = weakref.ref(wrapped.__self__, _callback)
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped.__func__, _callback)
)
except AttributeError:
self._self_instance = None
super(WeakFunctionProxy, self).__init__(weakref.proxy(wrapped, _callback))
def __call__(self, *args, **kwargs):
# We perform a boolean check here on the instance and wrapped
# function as that will trigger the reference error prior to
# calling if the reference had expired.
instance = self._self_instance and self._self_instance()
function = self.__wrapped__ and self.__wrapped__
# If the wrapped function was originally a bound function, for
# which we retained a reference to the instance and the unbound
# function we need to rebind the function and then call it. If
# not just called the wrapped function.
if instance is None:
return self.__wrapped__(*args, **kwargs)
return function.__get__(instance, type(instance))(*args, **kwargs)
|
453506
|
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import logging
import os
import stat
from subprocess import Popen, PIPE
import io
import numpy as np
import shutil
from contextlib import contextmanager
import gzip
logger = logging.getLogger(__name__)
initialized = False
def call_command(cmd, log_file=None, return_std=False):
if log_file is not None:
with io.open(log_file, "wb") as log:
proc = Popen(cmd, stdout=log, stderr=log, shell=True)
_ = proc.communicate()
exitcode = proc.returncode
if exitcode != 0:
raise RuntimeError(
"Calling command {} returned exit code {}. Output in file {}.".format(cmd, exitcode, log_file)
)
else:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
out, err = proc.communicate()
exitcode = proc.returncode
if exitcode != 0:
raise RuntimeError(
"Calling command {} returned exit code {}.\n\nStd output:\n\n{}Error output:\n\n{}".format(
cmd, exitcode, out, err
)
)
if return_std:
return out, err
return exitcode
def unzip_file(filename, new_filename, block_size=65536):
# call_command("gunzip -c {} > {}".format(filename, new_filename)) # Doesn't work under windows
with gzip.open(filename, "rb") as s_file, open(new_filename, "wb") as d_file:
shutil.copyfileobj(s_file, d_file, block_size)
def create_missing_folders(folders):
if folders is None:
return
for folder in folders:
if folder is None or folder == "":
continue
if not os.path.exists(folder):
os.makedirs(folder)
elif not os.path.isdir(folder):
raise OSError("Path {} exists, but is no directory!".format(folder))
def format_benchmark(parameters, precision=2):
output = ""
for i, (key, value) in enumerate(six.iteritems(parameters)):
if i > 0:
output += ", "
value = float(value)
if value < 2.0 * 10.0 ** (-precision) or value > 100.0:
output += str(key) + (" = {0:." + str(precision) + "e}").format(value)
else:
output += str(key) + (" = {0:." + str(precision) + "f}").format(value)
return output
def shuffle(*arrays):
""" Shuffles multiple arrays simultaneously """
permutation = None
n_samples = None
shuffled_arrays = []
for i, a in enumerate(arrays):
if a is None:
shuffled_arrays.append(a)
continue
if permutation is None:
n_samples = a.shape[0]
permutation = np.random.permutation(n_samples)
if a.shape[0] != n_samples:
raise RuntimeError(
"Mismatching shapes when trying to simultaneously shuffle: {}".format(
[None if val is None else val.shape for val in arrays]
)
)
shuffled_a = a[permutation]
shuffled_arrays.append(shuffled_a)
a = None
return shuffled_arrays
def restrict_samplesize(n, *arrays):
restricted_arrays = []
for i, a in enumerate(arrays):
if a is None:
restricted_arrays.append(None)
continue
restricted_arrays.append(a[:n])
return restricted_arrays
def balance_thetas(theta_sets_types, theta_sets_values, n_sets=None):
"""Repeats theta values such that all thetas lists have the same length """
if n_sets is None:
n_sets = max([len(thetas) for thetas in theta_sets_types])
for i, (types, values) in enumerate(zip(theta_sets_types, theta_sets_values)):
assert len(types) == len(values)
n_sets_before = len(types)
if n_sets_before != n_sets:
theta_sets_types[i] = [types[j % n_sets_before] for j in range(n_sets)]
theta_sets_values[i] = [values[j % n_sets_before] for j in range(n_sets)]
return theta_sets_types, theta_sets_values
def sanitize_array(array, replace_nan=0.0, replace_inf=0.0, replace_neg_inf=0.0, min_value=None, max_value=None):
array[np.isneginf(array)] = replace_neg_inf
array[np.isinf(array)] = replace_inf
array[np.isnan(array)] = replace_nan
if min_value is not None or max_value is not None:
array = np.clip(array, min_value, max_value)
return array
def load_and_check(filename, warning_threshold=1.0e9, memmap_files_larger_than_gb=None):
if filename is None:
return None
if not isinstance(filename, six.string_types):
data = filename
memmap = False
else:
filesize_gb = os.stat(filename).st_size / 1.0 * 1024 ** 3
if memmap_files_larger_than_gb is None or filesize_gb <= memmap_files_larger_than_gb:
logger.info(" Loading %s into RAM", filename)
data = np.load(filename)
memmap = False
else:
logger.info(" Loading %s as memory map", filename)
data = np.load(filename, mmap_mode="c")
memmap = True
if not memmap:
n_nans = np.sum(np.isnan(data))
n_infs = np.sum(np.isinf(data))
n_finite = np.sum(np.isfinite(data))
if n_nans + n_infs > 0:
logger.warning(
"%s contains %s NaNs and %s Infs, compared to %s finite numbers!", filename, n_nans, n_infs, n_finite
)
smallest = np.nanmin(data)
largest = np.nanmax(data)
if np.abs(smallest) > warning_threshold or np.abs(largest) > warning_threshold:
logger.warning("Warning: file %s has some large numbers, rangin from %s to %s", filename, smallest, largest)
if len(data.shape) == 1:
data = data.reshape(-1, 1)
return data
def math_commands():
"""Provides list with math commands - we need this when using eval"""
from math import acos, asin, atan, atan2, ceil, cos, cosh, exp, floor, log, pi, pow, sin, sinh, sqrt, tan, tanh
functions = [
"acos",
"asin",
"atan",
"atan2",
"ceil",
"cos",
"cosh",
"exp",
"floor",
"log",
"pi",
"pow",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
]
mathdefinitions = {}
for f in functions:
mathdefinitions[f] = locals().get(f, None)
return mathdefinitions
def make_file_executable(filename):
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC)
def copy_file(source, destination):
if source is None:
return
shutil.copyfile(source, destination)
def weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False, old_style=False):
"""
Calculates quantiles (similar to np.percentile), but supports weights.
Parameters
----------
values : ndarray
Data
quantiles : ndarray
Which quantiles to calculate
sample_weight : ndarray or None
Weights
values_sorted : bool
If True, will avoid sorting the initial array
old_style : bool
If True, will correct output to be consistent with np.percentile
Returns
-------
quantiles : ndarray
Quantiles
"""
# Input
values = np.array(values, dtype=np.float64)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight, dtype=np.float64)
assert np.all(quantiles >= 0.0) and np.all(quantiles <= 1.0), "quantiles should be in [0, 1]"
# Sort
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
# Quantiles
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
# Postprocessing
if old_style:
# To be consistent with np.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
def approx_equal(a, b, epsilon=1.0e-6):
return abs(a - b) < epsilon
def separate_information_blocks(fisher_information, parameters_of_interest):
# Find indices
n_parameters = len(fisher_information)
n_poi = len(parameters_of_interest)
poi_checked = []
nuisance_params = []
for i in range(n_parameters):
if i in parameters_of_interest:
poi_checked.append(i)
else:
nuisance_params.append(i)
assert n_poi == len(poi_checked), "Inconsistent input"
# Separate Fisher information parts
information_phys = fisher_information[parameters_of_interest, :][:, parameters_of_interest]
information_mix = fisher_information[nuisance_params, :][:, parameters_of_interest]
information_nuisance = fisher_information[nuisance_params, :][:, nuisance_params]
return nuisance_params, information_phys, information_mix, information_nuisance
def mdot(matrix, benchmark_information):
"""
Calculates a product between a matrix / matrices with shape (n1) or (a, n1) and a weight list with shape (b, n2)
or (n2,), where n1 and n2 do not have to be the same
"""
n1 = matrix.shape[-1]
weights_t = benchmark_information.T
n2 = weights_t.shape[0]
n_smaller = min(n1, n2)
if n1 > n2:
matrix = matrix.T
matrix = matrix[:n_smaller]
matrix = matrix.T
elif n2 > n1:
weights_t = weights_t[:n_smaller]
return matrix.dot(weights_t)
@contextmanager
def less_logging():
"""
Silences INFO logging messages. Based on https://gist.github.com/simon-weber/7853144
"""
if logging.root.manager.disable != logging.DEBUG:
yield
return
try:
logging.disable(logging.INFO)
yield
finally:
logging.disable(logging.DEBUG)
|
453549
|
import unittest
from config import Config as cfg
import requests
namespace = '/camera'
class Camera(unittest.TestCase):
routeUrl = cfg.serverUrl + "/camera"
camerasList = [1, 2, 3]
def getAllImagesFromCamera(self):
for camera in self.camerasList:
r = requests.get(f'{camera}/images')
def getCameraList(self):
r = requests.get(f'/list')
def getCamera(self):
for camera in self.camerasList:
r = requests.get(f'{camera}/')
# def test_IsAllCamerasAvailable(self):
# for camera in self.camerasList:
# r = requests.get(f"{self.routeUrl}/{camera}")
# self.assertEqual(200, r.status_code)
if __name__ == '__main__':
unittest.main()
|
453584
|
import os
import sys
import ast
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from lib.dataset.camvid import camvid_video_dataset, camvid_video_dataset_PDA
from lib.dataset.utils import runningScore
from lib.model.scnet import SCNet_dmnet as SCNet
def get_arguments():
parser = argparse.ArgumentParser(description="Train DMNet")
###### general setting ######
parser.add_argument("--exp_name", type=str, help="exp name")
parser.add_argument("--root_data_path", type=str, help="root path to the dataset")
parser.add_argument("--root_gt_path", type=str, help="root path to the ground truth")
parser.add_argument("--root_mask_path", type=str, help="root path to the deeplab mask")
parser.add_argument("--train_list_path", type=str, help="path to the list of train subset")
parser.add_argument("--test_list_path", type=str, help="path to the list of test subset")
###### training setting ######
parser.add_argument("--model_name", type=str, help="name for the training model")
parser.add_argument("--resume", type=ast.literal_eval, default=False, help="resume or not")
parser.add_argument("--resume_epoch", type=int, help="from which epoch for resume")
parser.add_argument("--resume_load_path", type=str, help="resume model load path")
parser.add_argument("--train_load_path", type=str, help="train model load path")
parser.add_argument("--local_rank", type=int, help="index the replica")
parser.add_argument("--dmnet_lr", type=float, help="learning rate")
parser.add_argument("--random_seed", type=int, help="random seed")
parser.add_argument("--train_flownet", type=ast.literal_eval, default=True, help="trian flownet or not")
parser.add_argument("--train_power", type=float, help="power value for linear learning rate schedule")
parser.add_argument("--final_lr", type=float, default=0.00001, help="learning rate in the second stage")
parser.add_argument("--weight_decay", type=float, help="learning rate")
parser.add_argument("--train_batch_size", type=int, help="train batch size")
parser.add_argument("--train_shuffle", type=ast.literal_eval, default=True, help="shuffle or not in training")
parser.add_argument("--train_num_workers", type=int, default=8, help="num cpu use")
parser.add_argument("--num_epoch", type=int, default=100, help="num of epoch in training")
parser.add_argument("--snap_shot", type=int, default=1, help="save model every per snap_shot")
parser.add_argument("--model_save_path", type=str, help="model save path")
###### testing setting ######
parser.add_argument("--test_batch_size", type=int, default=1, help="batch_size for validation")
parser.add_argument("--test_shuffle", type=ast.literal_eval, default=False, help="shuffle or not in validation")
parser.add_argument("--test_num_workers", type=int, default=4, help="num of used cpus in validation")
###### tensorboard setting ######
parser.add_argument("--use_tensorboard", type=ast.literal_eval, default=True, help="use tensorboard or not")
parser.add_argument("--tblog_dir", type=str, help="log save path")
parser.add_argument("--tblog_interval", type=int, default=50, help="interval for tensorboard logging")
return parser.parse_args()
def make_dirs(args):
if args.use_tensorboard and not os.path.exists(args.tblog_dir):
os.makedirs(args.tblog_dir)
if not os.path.exists(args.model_save_path):
os.makedirs(args.model_save_path)
def train():
torch.distributed.init_process_group(backend="nccl")
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
args = get_arguments()
if local_rank == 0:
print(args)
make_dirs(args)
random_seed = args.random_seed
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
if local_rank == 0:
print('random seed:{}'.format(random_seed))
if local_rank == 0 and args.use_tensorboard:
tblogger = SummaryWriter(args.tblog_dir)
net = SCNet(n_classes=11)
map_location = {'cuda:%d' % 0: 'cuda:%d' % local_rank}
if args.resume:
old_weight = torch.load(args.resume_load_path, map_location=map_location)
start_epoch = args.resume_epoch
else:
old_weight = torch.load(args.train_load_path, map_location=map_location)
start_epoch = 0
new_weight = {}
for k, v in old_weight.items():
k = k.replace('module.', '')
new_weight[k] = v
if args.resume:
net.load_state_dict(new_weight, strict=True)
else:
net.load_state_dict(new_weight, strict=False)
if local_rank == 0:
print('Successful loading model!')
net.cuda()
net = nn.parallel.DistributedDataParallel(net,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True)
train_data = camvid_video_dataset(args.root_data_path, args.root_gt_path, args.train_list_path, crop_size=None)
train_data_loader = torch.utils.data.DataLoader(train_data,
batch_size=args.train_batch_size,
shuffle=False,
pin_memory=False,
num_workers=args.train_num_workers,
drop_last=True,
sampler=DistributedSampler(train_data,
num_replicas=world_size,
rank=local_rank,
shuffle=True))
test_data = camvid_video_dataset_PDA(args.root_data_path, args.root_gt_path, args.test_list_path)
test_data_loader = torch.utils.data.DataLoader(test_data,
batch_size=args.test_batch_size,
shuffle=args.test_shuffle,
num_workers=args.test_num_workers)
dmnet_params = []
for m in net.module.dmnet.modules():
for p in m.parameters():
dmnet_params.append(p)
dmnet_optimizer = optim.Adam(params=dmnet_params, lr=args.dmnet_lr, betas=(0.9, 0.999), weight_decay=0)
running_loss = 0.0
current_eval_loss = 100
itr = start_epoch * len(train_data_loader)
max_itr = args.num_epoch * len(train_data_loader)
for epoch in range(start_epoch, args.num_epoch):
net.module.deeplab.eval()
net.module.flownet.eval()
net.module.dmnet.train()
train_data_loader.sampler.set_epoch(epoch)
for i, data_batch in enumerate(train_data_loader):
img_list, gt_label = data_batch
adjust_lr(args, dmnet_optimizer, itr, max_itr, args.dmnet_lr)
dmnet_optimizer.zero_grad()
loss_dmnet = net(img_list)
loss_dmnet = torch.mean(loss_dmnet)
loss_dmnet.backward()
dmnet_optimizer.step()
if local_rank == 0:
print('epoch:{}/{} batch:{}/{} iter:{} loss_dmnet:{:05f}'.format(epoch, args.num_epoch, i,
len(train_data_loader), itr,
loss_dmnet.item()))
if args.use_tensorboard and itr % args.tblog_interval == 0:
tblogger.add_scalar('loss_dmnet', loss_dmnet.item(), itr)
itr += 1
# if i == 5:
# break
dist.barrier()
if (epoch+1) % args.snap_shot == 0:
net.eval()
distance_list = [1, 5, 9]
eval_loss = []
for d in distance_list:
loss = 0.0
with torch.no_grad():
for step, sample in enumerate(test_data_loader):
if local_rank == 0:
print(d, step)
img_list, gt_label = sample
gt_label = gt_label.squeeze().cpu().numpy()
img = img_list[9 - d].cuda()
feat = net.module.deeplab(img)
warp_im = F.upsample(img, scale_factor=0.25, mode='bilinear', align_corners=True)
for i in range(d):
img_1 = img_list[9 - d + i].cuda()
img_2 = img_list[10 - d + i].cuda()
flow = net.module.flownet(torch.cat([img_2, img_1], dim=1))
feat = net.module.warpnet(feat, flow)
warp_im = net.module.warpnet(warp_im, flow)
img_2_down = F.upsample(img_2, scale_factor=0.25, mode='bilinear', align_corners=True)
dm = net.module.dmnet(warp_im, img_2_down)
dm = F.interpolate(dm, scale_factor=4, mode='bilinear', align_corners=True)
feat = F.interpolate(feat, scale_factor=4, mode='bilinear', align_corners=True)
out = torch.argmax(feat, dim=1, keepdim=True)
feat_cur = net.module.deeplab(img_2)
feat_cur = F.interpolate(feat_cur, scale_factor=4, mode='bilinear', align_corners=True)
out_cur = torch.argmax(feat_cur, dim=1, keepdim=True)
label = (out != out_cur).float()
loss += F.l1_loss(dm, label).squeeze().item()
# if step == 3:
# break
loss /= len(test_data_loader)
eval_loss.append(loss)
if local_rank == 0:
print('distance:{} eval_loss:{}'.format(d, loss))
if local_rank == 0:
if args.use_tensorboard:
for i, d in enumerate(distance_list):
tblogger.add_scalar('dmnet_eval_loss/distance_{}'.format(d), eval_loss[i], epoch)
dm = dm[0]
tblogger.add_image('dmnet', dm, epoch)
label = label[0]
tblogger.add_image('dmnet_GT', label, epoch)
save_name = 'now.pth'
save_path = os.path.join(args.model_save_path, save_name)
torch.save(net.module.dmnet.state_dict(), save_path)
dist.barrier()
if local_rank == 0:
save_name = 'final.pth'
save_path = os.path.join(args.model_save_path, save_name)
torch.save(net.module.dmnet.state_dict(), save_path)
print('%s has been saved' % save_path)
def adjust_lr(args, optimizer, itr, max_itr, lr):
if itr > max_itr / 2:
now_lr = lr / 10
else:
now_lr = lr
for group in optimizer.param_groups:
group['lr'] = now_lr
if __name__ == '__main__':
train()
dist.destroy_process_group()
|
453591
|
from collections import namedtuple
from typing import Any, Text, Iterable, List, Dict
from onnx import AttributeProto, numpy_helper
import numpy as np
__author__ = "<NAME>, <NAME> (University of Tuebingen, Chair for Embedded Systems)"
def _convertAttributeProto(onnx_arg):
"""
Convert an ONNX AttributeProto into an appropriate Python object for the type.
:param onnx_arg: A node of the graph representing the CNN
:return: Tensor attribute gets returned as numpy array
"""
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return numpy_helper.to_array(onnx_arg.t)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
EdgeInfo = namedtuple('EdgeInfo', ['name', 'type', 'shape'])
def _input_from_onnx_input(input) -> EdgeInfo:
"""
Create EdgeInfo named tupel containing name, type and shape of the input.
:param input: Input or output of the graph representation of the CNN.
:return: EdgeInfo tupel
"""
name = input.name
type = input.type.tensor_type.elem_type
shape = tuple([d.dim_value for d in input.type.tensor_type.shape.dim])
return EdgeInfo(name, type, shape)
class Attributes(Dict[Text, Any]):
"""
Custom dictionary object containing the parsed information from the protobuf attributes.
"""
@staticmethod
def from_onnx(args: AttributeProto) -> Any:
d = Attributes()
for arg in args:
d[arg.name] = _convertAttributeProto(arg)
return d
class ComputeNode(object):
"""
Contains all important information of a node in the graph representing the CNN.
"""
def __init__(self, name: str,
op_type: str,
attrs: Attributes,
inputs: List[str],
outputs: List[str]) -> None:
self.name: str = name
self.op_type: str = op_type
self.attrs: Attributes = attrs
self.inputs: List[str] = inputs
self.outputs: List[str] = outputs
self.input_tensors: Dict[str, np.ndarray] = {}
self.parents: List[ComputeNode] = []
self.children: List[ComputeNode] = []
self.metadata: Dict[Any, Any] = {}
@staticmethod
def from_onnx(node) -> Any:
attrs = Attributes.from_onnx(node.attribute)
name = Text(node.name)
if len(name) == 0:
name = node.op_type + "_".join(node.output)
return ComputeNode(name, node.op_type, attrs, list(node.input), list(node.output))
class ComputeGraph(object):
"""
Graph representing the CNN.
"""
def __init__(self, nodes: List[ComputeNode], inputs, outputs, shape_dict):
self.nodes: List[ComputeNode] = nodes
self.inputs = inputs
self.outputs = outputs
self.shape_dict: Dict[str, np.ndarray] = shape_dict
@staticmethod
def from_onnx(graph) -> Any:
"""
Create the ComputeGraph from the onnx model.
:param graph: The graph stored in the onnx file.
:return: ComputeGraph representing the CNN.
"""
input_tensors = {
t.name: numpy_helper.to_array(t) for t in graph.initializer
}
# Dictionary to hold the "value_info" field from ONNX graph
shape_dict: Dict[Text, Any] = {}
for input_t in input_tensors:
shape_dict[input_t] = input_tensors[input_t].shape
nodes_ = []
nodes_by_input: Dict[str, List[ComputeNode]] = {}
nodes_by_output: Dict[str, ComputeNode] = {}
for node in graph.node:
node_ = ComputeNode.from_onnx(node)
for input_ in node_.inputs:
if input_ in input_tensors:
node_.input_tensors[input_] = input_tensors[input_]
else:
if input_ in nodes_by_input:
input_nodes = nodes_by_input[input_]
else:
input_nodes = []
nodes_by_input[input_] = input_nodes
input_nodes.append(node_)
for output_ in node_.outputs:
nodes_by_output[output_] = node_
nodes_.append(node_)
inputs = []
for i in graph.input:
if i.name not in input_tensors:
inp = _input_from_onnx_input(i)
inputs.append(inp)
shape_dict[inp.name] = inp.shape
outputs = []
for o in graph.output:
out = _input_from_onnx_input(o)
outputs.append(out)
shape_dict[out.name] = out.shape
for node_ in nodes_:
for input_ in node_.inputs:
if input_ in nodes_by_output:
node_.parents.append(nodes_by_output[input_])
for output_ in node_.outputs:
if output_ in nodes_by_input:
node_.children.extend(nodes_by_input[output_])
def extract_value_info(shape_dict, value_info):
t = tuple([int(dim.dim_value) for dim in value_info.type.tensor_type.shape.dim])
if t:
shape_dict[value_info.name] = t
for value_info in graph.value_info:
extract_value_info(shape_dict, value_info)
return ComputeGraph(nodes_, inputs, outputs, shape_dict)
def remove_node(self, node):
print("Removing node", node.name)
if node not in self.nodes:
return
self.nodes.remove(node)
for parent in node.parents:
parent.children.remove(node)
if not parent.children:
self.remove_node(parent)
for child in node.children:
child.parents.remove(node)
def get_shape(self, name: Text) -> Iterable[int]:
if name in self.shape_dict:
return self.shape_dict[name]
return ()
def is_input(self, name: Text) -> bool:
for input in self.inputs:
if input.name == name:
return True
return False
def is_output(self, name: Text) -> bool:
for output in self.outputs:
if output.name == name:
return True
return False
def is_tensor(self, name: Text) -> bool:
for node in self.nodes:
if name in node.input_tensors:
return True
return False
|
453615
|
def simple_linear_regression_traditional(x, y):
"Traditional linear regression with B0 intercept, B1 slope"
import numpy as np
x = np.array(x); y = np.array(y)
mean_x = np.mean(x)
mean_y = np.mean(y)
err_x = x - mean_x
err_y = y - mean_y
err_mult = err_x * err_y
numerator = np.sum(err_mult)
err_x_squared = err_x**2
denominator = np.sum(err_x_squared)
B1 = numerator / denominator
B0 = mean_y - B1 * mean_x
return(B0, B1)
def simple_linear_regression_advanced(x, y):
"Covariance method linear regression with B0 intercept, B1 slope"
import numpy as np
import statistics as stat
x = np.array(x); y = np.array(y)
mean_x = np.mean(x)
mean_y = np.mean(y)
stdev_x = stat.stdev(x)
stdev_y = stat.stdev(y)
cov_x_y = (np.sum((x - mean_x) * (y - mean_y))) * (1 / (len(x) - 1))
corr_x_y = cov_x_y / (stdev_x * stdev_y)
B1 = corr_x_y * (stdev_y / stdev_x)
B0 = mean_y - B1 * mean_x
return(B0, B1)
|
453627
|
import pytest
from model_mommy import mommy
from rest_framework import status
@pytest.fixture
def awards_transaction_data(db):
mommy.make(
"awards.Award",
id=1,
generated_unique_award_id="CONT_AWD_zzz_whatever",
piid="zzz",
fain="abc123",
type="B",
total_obligation=1000,
)
mommy.make("awards.TransactionNormalized", id=1, award_id=1)
mommy.make(
"awards.Award",
id=2,
generated_unique_award_id="CONT_AWD_aaa_whatever",
piid="aaa",
fain="abc123",
type="B",
total_obligation=1000,
)
mommy.make("awards.TransactionNormalized", id=2, award_id=2)
mommy.make("awards.TransactionNormalized", id=3, award_id=2)
mommy.make("awards.TransactionNormalized", id=4, award_id=2)
mommy.make(
"awards.Award",
id=3,
generated_unique_award_id="ASST_NON_bbb_abc123",
piid="bbb",
fain="abc123",
type="04",
total_obligation=1000,
)
def test_award_success(client, awards_transaction_data):
"""Test transaction count endpoint"""
resp = client.get("/api/v2/awards/count/transaction/CONT_AWD_zzz_whatever/")
assert resp.status_code == status.HTTP_200_OK
assert resp.data["transactions"] == 1
resp = client.get("/api/v2/awards/count/transaction/1/")
assert resp.status_code == status.HTTP_200_OK
assert resp.data["transactions"] == 1
resp = client.get("/api/v2/awards/count/transaction/CONT_AWD_aaa_whatever/")
assert resp.status_code == status.HTTP_200_OK
assert resp.data["transactions"] == 3
resp = client.get("/api/v2/awards/count/transaction/2/")
assert resp.status_code == status.HTTP_200_OK
assert resp.data["transactions"] == 3
def test_award_no_transactions(client, awards_transaction_data):
"""Test transaction count endpoint for award with no transactions"""
resp = client.get("/api/v2/awards/count/transaction/ASST_NON_bbb_abc123/")
assert resp.status_code == status.HTTP_200_OK
assert resp.data["transactions"] == 0
resp = client.get("/api/v2/awards/count/transaction/3/")
assert resp.status_code == status.HTTP_200_OK
assert resp.data["transactions"] == 0
def test_missing_award(client, awards_transaction_data):
"""Test transaction count endpoint for award that does not exist"""
resp = client.get("/api/v2/awards/count/transaction/4/")
assert resp.status_code == status.HTTP_404_NOT_FOUND
assert resp.data["detail"] == "No Award found with: '4'"
|
453643
|
import contextlib
from typing import Optional
import click
from click import Context
from valohai_yaml.objs import Config, Pipeline
from valohai_cli.api import request
from valohai_cli.commands.pipeline.run.utils import build_edges, build_nodes, match_pipeline
from valohai_cli.ctx import get_project
from valohai_cli.messages import success
from valohai_cli.utils.commits import create_or_resolve_commit
@click.command(
context_settings=dict(ignore_unknown_options=True),
add_help_option=False
)
@click.argument('name', required=False, metavar='PIPELINE-NAME')
@click.option('--commit', '-c', default=None, metavar='SHA', help='The commit to use. Defaults to the current HEAD.')
@click.option('--title', '-c', default=None, help='The optional title of the pipeline run.')
@click.option('--adhoc', '-a', is_flag=True, help='Upload the current state of the working directory, then run it as an ad-hoc execution.')
@click.pass_context
def run(ctx: Context, name: Optional[str], commit: Optional[str], title: Optional[str], adhoc: bool) -> None:
"""
Start a pipeline run.
"""
# Having to explicitly compare to `--help` is slightly weird, but it's because of the nested command thing.
if name == '--help' or not name:
click.echo(ctx.get_help(), color=ctx.color)
print_pipeline_list(ctx, commit)
ctx.exit()
return
project = get_project(require=True)
assert project
commit = create_or_resolve_commit(project, commit=commit, adhoc=adhoc)
config = project.get_config()
matched_pipeline = match_pipeline(config, name)
pipeline = config.pipelines[matched_pipeline]
start_pipeline(config, pipeline, project.id, commit, title)
def print_pipeline_list(ctx: Context, commit: Optional[str]) -> None:
with contextlib.suppress(Exception): # If we fail to extract the pipeline list, it's not that big of a deal.
project = get_project(require=True)
assert project
config = project.get_config(commit_identifier=commit)
if config.pipelines:
click.secho('\nThese pipelines are available in the selected commit:\n', color=ctx.color, bold=True)
for pipeline_name in sorted(config.pipelines):
click.echo(f' * {pipeline_name}', color=ctx.color)
def start_pipeline(
config: Config,
pipeline: Pipeline,
project_id: str,
commit: str,
title: Optional[str] = None,
) -> None:
edges = build_edges(pipeline)
nodes = build_nodes(commit, config, pipeline)
payload = {
"edges": edges,
"nodes": nodes,
"project": project_id,
"title": title or pipeline.name,
}
resp = request(
method='post',
url='/api/v0/pipelines/',
json=payload,
).json()
success(f"Pipeline ={resp.get('counter')} queued. See {resp.get('urls').get('display')}")
|
453644
|
import numpy as np
def linear(t):
return t
def in_quad(t):
return t * t
def out_quad(t):
return -t * (t - 2)
def in_out_quad(t):
u = 2 * t - 1
a = 2 * t * t
b = -0.5 * (u * (u - 2) - 1)
return np.where(t < 0.5, a, b)
def in_cubic(t):
return t * t * t
def out_cubic(t):
u = t - 1
return u * u * u + 1
def in_out_cubic(t):
u = t * 2
v = u - 2
a = 0.5 * u * u * u
b = 0.5 * (v * v * v + 2)
return np.where(u < 1, a, b)
def in_quart(t):
return t * t * t * t
def out_quart(t):
u = t - 1
return -(u * u * u * u - 1)
def in_out_quart(t):
u = t * 2
v = u - 2
a = 0.5 * u * u * u * u
b = -0.5 * (v * v * v * v - 2)
return np.where(u < 1, a, b)
def in_quint(t):
return t * t * t * t * t
def out_quint(t):
u = t - 1
return u * u * u * u * u + 1
def in_out_quint(t):
u = t * 2
v = u - 2
a = 0.5 * u * u * u * u * u
b = 0.5 * (v * v * v * v * v + 2)
return np.where(u < 1, a, b)
def in_sine(t):
return -np.cos(t * np.pi / 2) + 1
def out_sine(t):
return np.sin(t * np.pi / 2)
def in_out_sine(t):
return -0.5 * (np.cos(np.pi * t) - 1)
def in_expo(t):
a = np.zeros(len(t))
b = 2 ** (10 * (t - 1))
return np.where(t == 0, a, b)
def out_expo(t):
a = np.zeros(len(t)) + 1
b = 1 - 2 ** (-10 * t)
return np.where(t == 1, a, b)
def in_out_expo(t):
zero = np.zeros(len(t))
one = zero + 1
a = 0.5 * 2 ** (20 * t - 10)
b = 1 - 0.5 * 2 ** (-20 * t + 10)
return np.where(t == 0, zero, np.where(t == 1, one, np.where(t < 0.5, a, b)))
def in_circ(t):
return -1 * (np.sqrt(1 - t * t) - 1)
def out_circ(t):
u = t - 1
return np.sqrt(1 - u * u)
def in_out_circ(t):
u = t * 2
v = u - 2
a = -0.5 * (np.sqrt(1 - u * u) - 1)
b = 0.5 * (np.sqrt(1 - v * v) + 1)
return np.where(u < 1, a, b)
def in_elastic(t, k=0.5):
u = t - 1
return -1 * (2 ** (10 * u) * np.sin((u - k / 4) * (2 * np.pi) / k))
def out_elastic(t, k=0.5):
return 2 ** (-10 * t) * np.sin((t - k / 4) * (2 * np.pi / k)) + 1
def in_out_elastic(t, k=0.5):
u = t * 2
v = u - 1
a = -0.5 * (2 ** (10 * v) * np.sin((v - k / 4) * 2 * np.pi / k))
b = 2 ** (-10 * v) * np.sin((v - k / 4) * 2 * np.pi / k) * 0.5 + 1
return np.where(u < 1, a, b)
def in_back(t):
k = 1.70158
return t * t * ((k + 1) * t - k)
def out_back(t):
k = 1.70158
u = t - 1
return u * u * ((k + 1) * u + k) + 1
def in_out_back(t):
k = 1.70158 * 1.525
u = t * 2
v = u - 2
a = 0.5 * (u * u * ((k + 1) * u - k))
b = 0.5 * (v * v * ((k + 1) * v + k) + 2)
return np.where(u < 1, a, b)
def in_bounce(t):
return 1 - out_bounce(1 - t)
def out_bounce(t):
a = (121 * t * t) / 16
b = (363 / 40 * t * t) - (99 / 10 * t) + 17 / 5
c = (4356 / 361 * t * t) - (35442 / 1805 * t) + 16061 / 1805
d = (54 / 5 * t * t) - (513 / 25 * t) + 268 / 25
return np.where(
t < 4 / 11, a, np.where(
t < 8 / 11, b, np.where(
t < 9 / 10, c, d)))
def in_out_bounce(t):
a = in_bounce(2 * t) * 0.5
b = out_bounce(2 * t - 1) * 0.5 + 0.5
return np.where(t < 0.5, a, b)
def in_square(t):
a = np.zeros(len(t))
b = a + 1
return np.where(t < 1, a, b)
def out_square(t):
a = np.zeros(len(t))
b = a + 1
return np.where(t > 0, b, a)
def in_out_square(t):
a = np.zeros(len(t))
b = a + 1
return np.where(t < 0.5, a, b)
def _main():
import matplotlib.pyplot as plt
fs = [
linear,
in_quad, out_quad, in_out_quad,
in_cubic, out_cubic, in_out_cubic,
in_quart, out_quart, in_out_quart,
in_quint, out_quint, in_out_quint,
in_sine, out_sine, in_out_sine,
in_expo, out_expo, in_out_expo,
in_circ, out_circ, in_out_circ,
in_elastic, out_elastic, in_out_elastic,
in_back, out_back, in_out_back,
in_bounce, out_bounce, in_out_bounce,
in_square, out_square, in_out_square,
]
x = np.linspace(0, 1, 1000)
for f in fs:
y = f(x)
plt.plot(x, y, label=f.__name__)
plt.legend()
plt.show()
if __name__ == '__main__':
_main()
|
453653
|
class Eigenstates:
def __init__(self, energies, array, extent, N, type):
"""Info about the eigenstates"""
self.energies = energies
self.array = array
self.number = len(array)
self.extent = extent
self.N = N
self.type = type
|
453672
|
from django.db import models
class OpenBasketManager(models.Manager):
"""For searching/creating OPEN baskets only."""
status_filter = "Open"
def get_queryset(self):
return super().get_queryset().filter(
status=self.status_filter)
def get_or_create(self, **kwargs):
return self.get_queryset().get_or_create(
status=self.status_filter, **kwargs)
class SavedBasketManager(models.Manager):
"""For searching/creating SAVED baskets only."""
status_filter = "Saved"
def get_queryset(self):
return super().get_queryset().filter(
status=self.status_filter)
def create(self, **kwargs):
return self.get_queryset().create(status=self.status_filter, **kwargs)
def get_or_create(self, **kwargs):
return self.get_queryset().get_or_create(
status=self.status_filter, **kwargs)
|
453715
|
from .fhirbase import fhirbase
class ClaimResponse(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
resourceType: This is a ClaimResponse resource
identifier: The Response business identifier.
status: The status of the resource instance.
patient: Patient Resource.
created: The date when the enclosed suite of services were performed
or completed.
insurer: The Insurer who produced this adjudicated response.
requestProvider: The practitioner who is responsible for the services
rendered to the patient.
requestOrganization: The organization which is responsible for the
services rendered to the patient.
request: Original request resource referrence.
outcome: Processing outcome errror, partial or complete processing.
disposition: A description of the status of the adjudication.
payeeType: Party to be reimbursed: Subscriber, provider, other.
item: The first tier service adjudications for submitted services.
addItem: The first tier service adjudications for payor added
services.
error: Mutually exclusive with Services Provided (Item).
totalCost: The total cost of the services reported.
unallocDeductable: The amount of deductible applied which was not
allocated to any particular service line.
totalBenefit: Total amount of benefit payable (Equal to sum of the
Benefit amounts from all detail lines and additions less the
Unallocated Deductible).
payment: Payment details for the claim if the claim has been paid.
reserved: Status of funds reservation (For provider, for Patient,
None).
form: The form to be used for printing the content.
processNote: Note text.
communicationRequest: Request for additional supporting or authorizing
information, such as: documents, images or resources.
insurance: Financial instrument by which payment information for
health care.
"""
__name__ = 'ClaimResponse'
def __init__(self, dict_values=None):
self.resourceType = 'ClaimResponse'
# type: str
# possible values: ClaimResponse
self.status = None
# type: str
self.patient = None
# reference to Reference: identifier
self.created = None
# type: str
self.insurer = None
# reference to Reference: identifier
self.requestProvider = None
# reference to Reference: identifier
self.requestOrganization = None
# reference to Reference: identifier
self.request = None
# reference to Reference: identifier
self.outcome = None
# reference to CodeableConcept
self.disposition = None
# type: str
self.payeeType = None
# reference to CodeableConcept
self.item = None
# type: list
# reference to ClaimResponse_Item
self.addItem = None
# type: list
# reference to ClaimResponse_AddItem
self.error = None
# type: list
# reference to ClaimResponse_Error
self.totalCost = None
# reference to Money
self.unallocDeductable = None
# reference to Money
self.totalBenefit = None
# reference to Money
self.payment = None
# reference to ClaimResponse_Payment: identifier
self.reserved = None
# reference to Coding
self.form = None
# reference to CodeableConcept
self.processNote = None
# type: list
# reference to ClaimResponse_ProcessNote
self.communicationRequest = None
# type: list
# reference to Reference: identifier
self.insurance = None
# type: list
# reference to ClaimResponse_Insurance
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'requestOrganization'},
{'parent_entity': 'ClaimResponse_AddItem',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'addItem'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'identifier'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'reserved'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'outcome'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'totalCost'},
{'parent_entity': 'ClaimResponse_Item',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'item'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'unallocDeductable'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'patient'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'insurer'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'payeeType'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'form'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'requestProvider'},
{'parent_entity': 'ClaimResponse_ProcessNote',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'processNote'},
{'parent_entity': 'ClaimResponse_Payment',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'payment'},
{'parent_entity': 'ClaimResponse_Insurance',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'insurance'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'request'},
{'parent_entity': 'ClaimResponse_Error',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'error'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse',
'child_variable': 'totalBenefit'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse',
'child_variable': 'communicationRequest'},
]
class ClaimResponse_Item(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
sequenceLinkId: A service line number.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudication results.
detail: The second tier service adjudications for submitted services.
"""
__name__ = 'ClaimResponse_Item'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: int
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ClaimResponse_Adjudication
self.detail = None
# type: list
# reference to ClaimResponse_Detail
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ClaimResponse_Detail',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Item',
'child_variable': 'detail'},
{'parent_entity': 'ClaimResponse_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Item',
'child_variable': 'adjudication'},
]
class ClaimResponse_Adjudication(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
category: Code indicating: Co-Pay, deductible, eligible, benefit, tax,
etc.
reason: Adjudication reason such as limit reached.
amount: Monetary amount associated with the code.
value: A non-monetary value for example a percentage. Mutually
exclusive to the amount element above.
"""
__name__ = 'ClaimResponse_Adjudication'
def __init__(self, dict_values=None):
self.category = None
# reference to CodeableConcept
self.reason = None
# reference to CodeableConcept
self.amount = None
# reference to Money
self.value = None
# type: int
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Adjudication',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Adjudication',
'child_variable': 'reason'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Adjudication',
'child_variable': 'amount'},
]
class ClaimResponse_Detail(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
sequenceLinkId: A service line number.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
subDetail: The third tier service adjudications for submitted
services.
"""
__name__ = 'ClaimResponse_Detail'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: int
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ClaimResponse_Adjudication
self.subDetail = None
# type: list
# reference to ClaimResponse_SubDetail
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ClaimResponse_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail',
'child_variable': 'adjudication'},
{'parent_entity': 'ClaimResponse_SubDetail',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail',
'child_variable': 'subDetail'},
]
class ClaimResponse_SubDetail(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
sequenceLinkId: A service line number.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
"""
__name__ = 'ClaimResponse_SubDetail'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: int
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ClaimResponse_Adjudication
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ClaimResponse_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_SubDetail',
'child_variable': 'adjudication'},
]
class ClaimResponse_AddItem(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
sequenceLinkId: List of input service items which this service line is
intended to replace.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: A code to indicate the Professional Service or Product
supplied.
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
fee: The fee charged for the professional service or product..
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
detail: The second tier service adjudications for payor added
services.
"""
__name__ = 'ClaimResponse_AddItem'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: list
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.fee = None
# reference to Money
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ClaimResponse_Adjudication
self.detail = None
# type: list
# reference to ClaimResponse_Detail1
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ClaimResponse_Detail1',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'detail'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'modifier'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'service'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'fee'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'revenue'},
{'parent_entity': 'ClaimResponse_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'adjudication'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_AddItem',
'child_variable': 'category'},
]
class ClaimResponse_Detail1(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: A code to indicate the Professional Service or Product
supplied.
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
fee: The fee charged for the professional service or product..
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
"""
__name__ = 'ClaimResponse_Detail1'
def __init__(self, dict_values=None):
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.fee = None
# reference to Money
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ClaimResponse_Adjudication
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail1',
'child_variable': 'category'},
{'parent_entity': 'ClaimResponse_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail1',
'child_variable': 'adjudication'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail1',
'child_variable': 'fee'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail1',
'child_variable': 'service'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail1',
'child_variable': 'revenue'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Detail1',
'child_variable': 'modifier'},
]
class ClaimResponse_Error(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
sequenceLinkId: The sequence number of the line item submitted which
contains the error. This value is omitted when the error is elsewhere.
detailSequenceLinkId: The sequence number of the addition within the
line item submitted which contains the error. This value is omitted
when the error is not related to an Addition.
subdetailSequenceLinkId: The sequence number of the addition within
the line item submitted which contains the error. This value is
omitted when the error is not related to an Addition.
code: An error code,from a specified code system, which details why
the claim could not be adjudicated.
"""
__name__ = 'ClaimResponse_Error'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: int
self.detailSequenceLinkId = None
# type: int
self.subdetailSequenceLinkId = None
# type: int
self.code = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Error',
'child_variable': 'code'},
]
class ClaimResponse_Payment(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
type: Whether this represents partial or complete payment of the
claim.
adjustment: Adjustment to the payment of this transaction which is not
related to adjudication of this transaction.
adjustmentReason: Reason for the payment adjustment.
date: Estimated payment data.
amount: Payable less any payment adjustment.
identifier: Payment identifier.
"""
__name__ = 'ClaimResponse_Payment'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.adjustment = None
# reference to Money
self.adjustmentReason = None
# reference to CodeableConcept
self.date = None
# type: str
self.amount = None
# reference to Money
self.identifier = None
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Payment',
'child_variable': 'amount'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Payment',
'child_variable': 'adjustmentReason'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Payment',
'child_variable': 'type'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Payment',
'child_variable': 'adjustment'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_Payment',
'child_variable': 'identifier'},
]
class ClaimResponse_ProcessNote(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
number: An integer associated with each note which may be referred to
from each service line item.
type: The note purpose: Print/Display.
text: The note text.
language: The ISO-639-1 alpha 2 code in lower case for the language,
optionally followed by a hyphen and the ISO-3166-1 alpha 2 code for
the region in upper case; e.g. "en" for English, or "en-US" for
American English versus "en-EN" for England English.
"""
__name__ = 'ClaimResponse_ProcessNote'
def __init__(self, dict_values=None):
self.number = None
# type: int
self.type = None
# reference to CodeableConcept
self.text = None
# type: str
self.language = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_ProcessNote',
'child_variable': 'type'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ClaimResponse_ProcessNote',
'child_variable': 'language'},
]
class ClaimResponse_Insurance(fhirbase):
"""
This resource provides the adjudication details from the processing of
a Claim resource.
Args:
sequence: A service line item.
focal: The instance number of the Coverage which is the focus for
adjudication. The Coverage against which the claim is to be
adjudicated.
coverage: Reference to the program or plan identification, underwriter
or payor.
businessArrangement: The contract number of a business agreement which
describes the terms and conditions.
preAuthRef: A list of references from the Insurer to which these
services pertain.
claimResponse: The Coverages adjudication details.
"""
__name__ = 'ClaimResponse_Insurance'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.focal = None
# type: bool
self.coverage = None
# reference to Reference: identifier
self.businessArrangement = None
# type: str
self.preAuthRef = None
# type: list
self.claimResponse = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse_Insurance',
'child_variable': 'coverage'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ClaimResponse_Insurance',
'child_variable': 'claimResponse'},
]
|
453745
|
import asyncio
import enum
import random
import time
import idom
class GameState(enum.Enum):
init = 0
lost = 1
won = 2
play = 3
@idom.component
def GameView():
game_state, set_game_state = idom.hooks.use_state(GameState.init)
if game_state == GameState.play:
return GameLoop(grid_size=6, block_scale=50, set_game_state=set_game_state)
start_button = idom.html.button(
{"onClick": lambda event: set_game_state(GameState.play)},
"Start",
)
if game_state == GameState.won:
menu = idom.html.div(idom.html.h3("You won!"), start_button)
elif game_state == GameState.lost:
menu = idom.html.div(idom.html.h3("You lost"), start_button)
else:
menu = idom.html.div(idom.html.h3("Click to play"), start_button)
menu_style = idom.html.style(
"""
.snake-game-menu h3 {
margin-top: 0px !important;
}
"""
)
return idom.html.div({"className": "snake-game-menu"}, menu_style, menu)
class Direction(enum.Enum):
ArrowUp = (0, -1)
ArrowLeft = (-1, 0)
ArrowDown = (0, 1)
ArrowRight = (1, 0)
@idom.component
def GameLoop(grid_size, block_scale, set_game_state):
# we `use_ref` here to capture the latest direction press without any delay
direction = idom.hooks.use_ref(Direction.ArrowRight.value)
# capture the last direction of travel that was rendered
last_direction = direction.current
snake, set_snake = idom.hooks.use_state([(grid_size // 2 - 1, grid_size // 2 - 1)])
food, set_food = use_snake_food(grid_size, snake)
grid = create_grid(grid_size, block_scale)
@idom.event(prevent_default=True)
def on_direction_change(event):
if hasattr(Direction, event["key"]):
maybe_new_direction = Direction[event["key"]].value
direction_vector_sum = tuple(
map(sum, zip(last_direction, maybe_new_direction))
)
if direction_vector_sum != (0, 0):
direction.current = maybe_new_direction
grid_wrapper = idom.html.div({"onKeyDown": on_direction_change}, grid)
assign_grid_block_color(grid, food, "blue")
for location in snake:
assign_grid_block_color(grid, location, "white")
new_game_state = None
if snake[-1] in snake[:-1]:
assign_grid_block_color(grid, snake[-1], "red")
new_game_state = GameState.lost
elif len(snake) == grid_size**2:
assign_grid_block_color(grid, snake[-1], "yellow")
new_game_state = GameState.won
interval = use_interval(0.5)
@idom.hooks.use_effect
async def animate():
if new_game_state is not None:
await asyncio.sleep(1)
set_game_state(new_game_state)
return
await interval
new_snake_head = (
# grid wraps due to mod op here
(snake[-1][0] + direction.current[0]) % grid_size,
(snake[-1][1] + direction.current[1]) % grid_size,
)
if snake[-1] == food:
set_food()
new_snake = snake + [new_snake_head]
else:
new_snake = snake[1:] + [new_snake_head]
set_snake(new_snake)
return grid_wrapper
def use_snake_food(grid_size, current_snake):
grid_points = {(x, y) for x in range(grid_size) for y in range(grid_size)}
points_not_in_snake = grid_points.difference(current_snake)
food, _set_food = idom.hooks.use_state(current_snake[-1])
def set_food():
_set_food(random.choice(list(points_not_in_snake)))
return food, set_food
def use_interval(rate):
usage_time = idom.hooks.use_ref(time.time())
async def interval() -> None:
await asyncio.sleep(rate - (time.time() - usage_time.current))
usage_time.current = time.time()
return asyncio.ensure_future(interval())
def create_grid(grid_size, block_scale):
return idom.html.div(
{
"style": {
"height": f"{block_scale * grid_size}px",
"width": f"{block_scale * grid_size}px",
"cursor": "pointer",
"display": "grid",
"grid-gap": 0,
"grid-template-columns": f"repeat({grid_size}, {block_scale}px)",
"grid-template-rows": f"repeat({grid_size}, {block_scale}px)",
},
"tabIndex": -1,
},
[
idom.html.div(
{"style": {"height": f"{block_scale}px"}},
[
create_grid_block("black", block_scale, key=i)
for i in range(grid_size)
],
key=i,
)
for i in range(grid_size)
],
)
def create_grid_block(color, block_scale, key):
return idom.html.div(
{
"style": {
"height": f"{block_scale}px",
"width": f"{block_scale}px",
"backgroundColor": color,
"outline": "1px solid grey",
}
},
key=key,
)
def assign_grid_block_color(grid, point, color):
x, y = point
block = grid["children"][x]["children"][y]
block["attributes"]["style"]["backgroundColor"] = color
idom.run(GameView)
|
453766
|
from quom.tokenizer.iterator import RawIterator, LineWrapIterator, Span
def check_iterator(it, res):
crr = '\0'
for c in res:
prv = crr
crr = c
assert it.prev == prv
assert it.curr == crr
it.next()
assert it.next() is False
def test_raw_iterator():
it = RawIterator('ab')
check_iterator(it, 'ab')
it = RawIterator('a\rb')
check_iterator(it, 'a\rb')
it = RawIterator('a\r\nb')
check_iterator(it, 'a\r\nb')
it = RawIterator('a\\\r\nb')
check_iterator(it, 'a\\\r\nb')
it = RawIterator('a\\\nb')
check_iterator(it, 'a\\\nb')
it = RawIterator('a\\\rb')
check_iterator(it, 'a\\\rb')
it = RawIterator('a\\\r\\\r\nb')
check_iterator(it, 'a\\\r\\\r\nb')
it = RawIterator('a\\b')
check_iterator(it, 'a\\b')
it = RawIterator('"\\a')
check_iterator(it, '"\\a')
it = RawIterator('\\\\')
check_iterator(it, '\\\\')
it = RawIterator('\\')
check_iterator(it, '\\')
it = RawIterator('\\\n')
check_iterator(it, '\\\n')
it = RawIterator('\r')
check_iterator(it, '\r')
it = RawIterator('a')
assert it.lookahead == '\0'
it = RawIterator('\\')
assert it.lookahead == '\0'
it = RawIterator('\\\na')
assert it.lookahead == '\n'
it = RawIterator('')
assert it.lookahead == '\0'
def test_escape_iterator():
it = LineWrapIterator('ab')
check_iterator(it, 'ab')
it = LineWrapIterator('a\rb')
check_iterator(it, 'a\rb')
it = LineWrapIterator('a\r\nb')
check_iterator(it, 'a\r\nb')
it = LineWrapIterator('a\\\r\nb')
check_iterator(it, 'ab')
it = LineWrapIterator('a\\\nb')
check_iterator(it, 'ab')
it = LineWrapIterator('a\\\rb')
check_iterator(it, 'ab')
it = LineWrapIterator('a\\\\\nb')
check_iterator(it, 'a\\b')
it = LineWrapIterator('a\\\r\\\r\nb')
check_iterator(it, 'ab')
it = LineWrapIterator('a\\b')
check_iterator(it, 'a\\b')
it = LineWrapIterator('"\\a')
check_iterator(it, '"\\a')
it = LineWrapIterator('\\\\')
check_iterator(it, '\\\\')
it = LineWrapIterator('\\')
check_iterator(it, '\\')
it = LineWrapIterator('\\\n')
check_iterator(it, '\0')
it = LineWrapIterator('a')
assert it.lookahead == '\0'
it = LineWrapIterator('\\')
assert it.lookahead == '\0'
it = LineWrapIterator('\\\na')
assert it.lookahead == '\0'
it = LineWrapIterator('')
assert it.lookahead == '\0'
it = LineWrapIterator('a\\\n')
assert it.lookahead == '\0'
def test_copy():
it1 = LineWrapIterator('ab')
assert it1.curr == 'a'
it2 = it1.copy()
it2.next()
assert it1.curr == 'a'
assert it2.curr == 'b'
it1.next()
assert it1.curr == 'b'
assert it2.curr == 'b'
def test_iterator_casting():
it = LineWrapIterator('a\\\r\\b\\\nc')
assert it.curr == 'a'
it = LineWrapIterator(it)
it.next()
assert it.curr == '\\'
it.next()
assert it.curr == 'b'
it = RawIterator(it)
it.next()
assert it.curr == '\\'
it.next()
assert it.curr == '\n'
it.next()
assert it.curr == 'c'
def test_span():
it1 = LineWrapIterator('abc')
assert ''.join(it1) == 'abc'
assert ''.join(it1) == 'abc'
it2 = it1.copy()
it2.next()
assert ''.join(it2) == 'bc'
assert ''.join(it2) == 'bc'
it1 = LineWrapIterator('a ')
it2 = it1.copy()
it2.next()
span = Span(it1, it2)
assert ''.join(span) == 'a'
assert ''.join(span) == 'a'
|
453784
|
import glob
import numpy as np
import os
import scipy.io as scio
import torch
from torch.utils.data import Dataset
class trainset_loader(Dataset):
def __init__(self, root, dose):
self.file_path = 'input_' + dose
self.files_A = sorted(glob.glob(os.path.join(root, 'train', self.file_path, 'data') + '*.mat'))
def __getitem__(self, index):
file_A = self.files_A[index]
file_B = file_A.replace(self.file_path,'label_single')
file_C = file_A.replace('input','projection')
input_data = scio.loadmat(file_A)['data']
label_data = scio.loadmat(file_B)['data']
prj_data = scio.loadmat(file_C)['data']
input_data = torch.FloatTensor(input_data).unsqueeze_(0)
label_data = torch.FloatTensor(label_data).unsqueeze_(0)
prj_data = torch.FloatTensor(prj_data).unsqueeze_(0)
return input_data, label_data, prj_data
def __len__(self):
return len(self.files_A)
class testset_loader(Dataset):
def __init__(self, root, dose):
self.file_path = 'input_' + dose
self.files_A = sorted(glob.glob(os.path.join(root, 'test', self.file_path, 'data') + '*.mat'))
def __getitem__(self, index):
file_A = self.files_A[index]
file_B = file_A.replace(self.file_path,'label_single')
file_C = file_A.replace('input','projection')
res_name = 'result\\' + file_A[-13:]
input_data = scio.loadmat(file_A)['data']
label_data = scio.loadmat(file_B)['data']
prj_data = scio.loadmat(file_C)['data']
input_data = torch.FloatTensor(input_data).unsqueeze_(0)
label_data = torch.FloatTensor(label_data).unsqueeze_(0)
prj_data = torch.FloatTensor(prj_data).unsqueeze_(0)
return input_data, label_data, prj_data, res_name
def __len__(self):
return len(self.files_A)
|
453827
|
import inspect
from typing import List, Set, Mapping, Dict
import pytest
from guniflask.data_model.typing import parse_json, analyze_arg_type, inspect_args
class Person:
name: str
age: int
class Teacher(Person):
classes: Set
class Student(Person):
secret: str
mentor: Teacher
parents: List[Person]
scores: dict
hobbies: List = None
graduated: bool = False
def test_parse_simple_data():
assert parse_json('1', dtype=int) == 1
assert parse_json('1', dtype=List[int]) == [1]
assert parse_json([1, 2, 3]) == [1, 2, 3]
assert parse_json({'key': 'value'}) == {'key': 'value'}
def test_parse_object_data():
student_data = dict(
name='Bob',
age=12,
graduated=True,
hobbies=['Programming', 'Piano'],
scores={'Math': 100},
parents=[
dict(
name='Billy',
age=40
),
dict(
name='Judy',
age=39
)
],
mentor=dict(
name='Alice',
age=41,
classes=['English', 'Math']
)
)
student = parse_json(student_data, Student)
assert isinstance(student, Student)
assert student.name == 'Bob'
assert student.age == 12
assert student.graduated is True
assert student.hobbies == ['Programming', 'Piano']
assert student.scores == {'Math': 100}
assert not hasattr(student, 'secret')
assert isinstance(student.parents, list) and len(student.parents) == 2
for parent in student.parents:
if parent.name == 'Billy':
assert parent.age == 40
elif parent.name == 'Judy':
assert parent.age == 39
else:
raise RuntimeError
assert isinstance(student.mentor, Teacher)
mentor = student.mentor
assert mentor.name == 'Alice'
assert mentor.age == 41
assert isinstance(mentor.classes, set)
assert mentor.classes == {'English', 'Math'}
def test_parse_list():
class Prop:
name: str
value: str
prop_list = parse_json({'name': 'name', 'value': 'Alice'}, List[Prop])
assert isinstance(prop_list, list) and len(prop_list) == 1
prop = prop_list[0]
assert prop.name == 'name' and prop.value == 'Alice'
prop_list = parse_json(None, List[Prop])
assert prop_list is None
def test_analyze_arg_type():
arg_ = analyze_arg_type(None)
assert arg_.is_singleton() and arg_.outer_type is None
with pytest.raises(AssertionError):
analyze_arg_type('')
arg_ = analyze_arg_type(list)
assert arg_.is_list() and arg_.outer_type is None
arg_ = analyze_arg_type(set)
assert arg_.is_set() and arg_.outer_type is None
arg_ = analyze_arg_type(dict)
assert arg_.is_dict() and arg_.outer_type == (None, None)
arg_ = analyze_arg_type(int)
assert arg_.is_singleton() and arg_.outer_type is int
arg_ = analyze_arg_type(str)
assert arg_.is_singleton() and arg_.outer_type is str
arg_ = analyze_arg_type(List)
assert arg_.is_list() and arg_.outer_type is None
arg_ = analyze_arg_type(Set)
assert arg_.is_set() and arg_.outer_type is None
arg_ = analyze_arg_type(Mapping)
assert arg_.is_dict() and arg_.outer_type == (None, None)
arg_ = analyze_arg_type(Dict)
assert arg_.is_dict() and arg_.outer_type == (None, None)
class A:
pass
arg_ = analyze_arg_type(A)
assert arg_.is_singleton() and arg_.outer_type is A
arg_ = analyze_arg_type(List[A])
assert arg_.is_list() and arg_.outer_type is A
arg_ = analyze_arg_type(List[str])
assert arg_.is_list() and arg_.outer_type is str
arg_ = analyze_arg_type(Set[A])
assert arg_.is_set() and arg_.outer_type is A
arg_ = analyze_arg_type(Set[str])
assert arg_.is_set() and arg_.outer_type is str
arg_ = analyze_arg_type(Mapping[str, A])
assert arg_.is_dict() and arg_.outer_type == (str, A)
arg_ = analyze_arg_type(Mapping[str, str])
assert arg_.is_dict() and arg_.outer_type == (str, str)
arg_ = analyze_arg_type(Dict[str, A])
assert arg_.is_dict() and arg_.outer_type == (str, A)
arg_ = analyze_arg_type(Dict[str, str])
assert arg_.is_dict() and arg_.outer_type == (str, str)
arg_ = analyze_arg_type(Mapping[str, List[A]])
assert arg_.is_dict() and type(arg_.outer_type) == tuple
outer_type = arg_.outer_type
assert outer_type[0] == str
assert outer_type[1].is_list() and outer_type[1].outer_type == A
def test_inspect_args():
class A:
pass
def func(a, b: int, c: List, d=1, e: List = None, f: List[str] = None, g: A = None) -> dict:
pass
args, hints = inspect_args(func)
assert args['a'] is inspect._empty
assert args['b'] is inspect._empty
assert args['c'] is inspect._empty
assert args['d'] == 1
assert args['e'] is None
assert args['f'] is None
assert args['g'] is None
assert hints == {'return': dict, 'b': int, 'c': List, 'e': List, 'f': List[str], 'g': A}
|
453917
|
import os.path as osp
import sys
import numpy as np
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from roi_align import RoIAlign # noqa: E402, isort:skip
feat_size = 15
spatial_scale = 1.0 / 8
img_size = feat_size / spatial_scale
num_imgs = 2
num_rois = 20
batch_ind = np.random.randint(num_imgs, size=(num_rois, 1))
rois = np.random.rand(num_rois, 4) * img_size * 0.5
rois[:, 2:] += img_size * 0.5
rois = np.hstack((batch_ind, rois))
feat = torch.randn(
num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0')
rois = torch.from_numpy(rois).float().cuda()
inputs = (feat, rois)
print('Gradcheck for roi align...')
test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3)
print(test)
test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3)
print(test)
|
453983
|
from aiohttp_requests import requests
from aioresponses import aioresponses
async def test_aiohttp_requests():
test_url = 'http://dummy-url'
test_payload = {'hello': 'world'}
with aioresponses() as mocked:
mocked.get(test_url, payload=test_payload)
response = await requests.get(test_url)
json = await response.json()
assert test_payload == json
requests.close() # Normally called on destroy
async def test_aiohttp_requests_integration():
response = await requests.get('https://www.google.com')
content = await response.text()
assert response.status == 200
assert len(content) > 10000
async def test_aiohttp_requests_after_close(loop):
# Closing ourself
requests.close()
await test_aiohttp_requests_integration()
# Closing aiohttp session
await requests.session.close()
await test_aiohttp_requests_integration()
|
453995
|
from __future__ import absolute_import
from ._filters import *
import numpy
import scipy.ndimage.filters
import skimage.filters
import skimage.morphology
__all__ = []
for key in _filters.__dict__.keys():
__all__.append(key)
def gaussianSmoothing(image, sigma, nSpatialDimensions=2):
image = numpy.require(image, dtype='float32')
if image.ndim == nSpatialDimensions:
return scipy.ndimage.filters.gaussian_filter(image, sigma)
elif image.ndim == nSpatialDimensions + 1:
raise RuntimeError("not yer implemented")
else:
raise RuntimeError("image dimension does not match spatial dimension")
def gaussianGradientMagnitude(image, sigma, nSpatialDimensions=2):
image = numpy.require(image, dtype='float32')
if image.ndim == nSpatialDimensions:
return scipy.ndimage.filters.gaussian_gradient_magnitude(image, sigma)
elif image.ndim == nSpatialDimensions + 1:
out = None
nChannels = image.shape[image.ndim-1]
for c in range(nChannels):
cImage = image[...,c]
gm = scipy.ndimage.filters.gaussian_gradient_magnitude(cImage, sigma)
if out is None:
out = gm
else:
out += gm
out /= nChannels
return out
else:
raise RuntimeError("image dimension does not match spatial dimension")
def affinitiesToProbability(affinities, edge_format=-1):
ndim = affinities.ndim
n_channels = affinities.shape[2]
if ndim != 3 or n_channels != 2:
raise RuntimeError("ndim must be 3 and n_channels must be 2")
if edge_format == 1:
ax = affinities[:, :, 0]
ay = affinities[:, :, 1]
ax_ = ax[0:-1,: ]
ay_ = ax[: ,0:-1]
axx = ax.copy()
ayy = ay.copy()
axx[1 :, :] += ax_
ayy[:, 1 :] += ay_
elif edge_format == -1:
ax = affinities[:, :, 0]
ay = affinities[:, :, 1]
ax_ = ax[1:,: ]
ay_ = ax[: ,1:]
axx = ax.copy()
ayy = ay.copy()
axx[0:-1, :] += ax_
ayy[:, 0:-1] += ay_
else:
raise RuntimeError("format must be in [1,-1]")
return 1- (axx + ayy)/2.0
try :
import vigra
__has_vigra = True
except ImportError:
__has_vigra = False
def diskMedian(img, radius):
nimg = img.copy()
oldMin = img.min()
oldMax = img.max()
nimg = numpy.require(nimg, dtype='float32')
nimg -= oldMin
nimg /= (oldMax - oldMin)
nimg *= 255.0
nimg = nimg.astype('uint8')
disk = skimage.morphology.disk(radius)
r = skimage.filters.median(nimg, disk).astype('float32')/255.0
r *= (oldMax - oldMin)
r += oldMin
return r
if __has_vigra:
def hessianOfGaussianEigenvalues(image, sigma):
imageShape = image.shape
nDim = image.ndim
iamgeR = numpy.require(image, dtype='float32', requirements=['C'])
imageT = iamgeR.T
res = vigra.filters.hessianOfGaussianEigenvalues(imageT, sigma).view(numpy.ndarray).T
res = numpy.moveaxis(res,0,-1)
return numpy.require(res, requirements=['C'])
def hessianOfGaussianStrongestEigenvalue(image, sigma):
imageShape = image.shape
nDim = image.ndim
iamgeR = numpy.require(image, dtype='float32', requirements=['C'])
imageT = iamgeR.T
res = vigra.filters.hessianOfGaussianEigenvalues(imageT, sigma)[:,:,0].view(numpy.ndarray).T
return numpy.require(res, requirements=['C'])
|
454050
|
import warnings
from io import StringIO
import numpy
from sklearn.base import TransformerMixin
from sklearn.utils import column_or_1d
from sklearn.utils.validation import check_is_fitted
try:
from scipy.io import arff
HAS_ARFF = True
except:
HAS_ARFF = False
try:
from sklearn.utils.estimator_checks import _NotAnArray as NotAnArray
except ImportError: # Old sklearn versions
from sklearn.utils.estimator_checks import NotAnArray
from tslearn.bases import TimeSeriesBaseEstimator
__author__ = '<NAME> <EMAIL>[at]<EMAIL>'
def check_dims(X, X_fit_dims=None, extend=True, check_n_features_only=False):
"""Reshapes X to a 3-dimensional array of X.shape[0] univariate
timeseries of length X.shape[1] if X is 2-dimensional and extend
is True. Then checks whether the provided X_fit_dims and the
dimensions of X (except for the first one), match.
Parameters
----------
X : array-like
The first array to be compared.
X_fit_dims : tuple (default: None)
The dimensions of the data generated by fit, to compare with
the dimensions of the provided array X.
If None, then only perform reshaping of X, if necessary.
extend : boolean (default: True)
Whether to reshape X, if it is 2-dimensional.
check_n_features_only: boolean (default: False)
Returns
-------
array
Reshaped X array
Examples
--------
>>> X = numpy.empty((10, 3))
>>> check_dims(X).shape
(10, 3, 1)
>>> X = numpy.empty((10, 3, 1))
>>> check_dims(X).shape
(10, 3, 1)
>>> X_fit_dims = (5, 3, 1)
>>> check_dims(X, X_fit_dims).shape
(10, 3, 1)
>>> X_fit_dims = (5, 3, 2)
>>> check_dims(X, X_fit_dims) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Dimensions (except first) must match! ((5, 3, 2) and (10, 3, 1)
are passed shapes)
>>> X_fit_dims = (5, 5, 1)
>>> check_dims(X, X_fit_dims, check_n_features_only=True).shape
(10, 3, 1)
>>> X_fit_dims = (5, 5, 2)
>>> check_dims(
... X,
... X_fit_dims,
... check_n_features_only=True
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Number of features of the provided timeseries must match!
(last dimension) must match the one of the fitted data!
((5, 5, 2) and (10, 3, 1) are passed shapes)
Raises
------
ValueError
Will raise exception if X is None or (if X_fit_dims is provided) one
of the dimensions of the provided data, except the first, does not
match X_fit_dims.
"""
if X is None:
raise ValueError('X is equal to None!')
if extend and len(X.shape) == 2:
warnings.warn('2-Dimensional data passed. Assuming these are '
'{} 1-dimensional timeseries'.format(X.shape[0]))
X = X.reshape((X.shape) + (1,))
if X_fit_dims is not None:
if check_n_features_only:
if X_fit_dims[2] != X.shape[2]:
raise ValueError(
'Number of features of the provided timeseries'
'(last dimension) must match the one of the fitted data!'
' ({} and {} are passed shapes)'.format(X_fit_dims,
X.shape))
else:
if X_fit_dims[1:] != X.shape[1:]:
raise ValueError(
'Dimensions of the provided timeseries'
'(except first) must match those of the fitted data!'
' ({} and {} are passed shapes)'.format(X_fit_dims,
X.shape))
return X
def to_time_series(ts, remove_nans=False):
"""Transforms a time series so that it fits the format used in ``tslearn``
models.
Parameters
----------
ts : array-like
The time series to be transformed.
remove_nans : bool (default: False)
Whether trailing NaNs at the end of the time series should be removed
or not
Returns
-------
numpy.ndarray of shape (sz, d)
The transformed time series. This is always guaraneteed to be a new
time series and never just a view into the old one.
Examples
--------
>>> to_time_series([1, 2])
array([[1.],
[2.]])
>>> to_time_series([1, 2, numpy.nan])
array([[ 1.],
[ 2.],
[nan]])
>>> to_time_series([1, 2, numpy.nan], remove_nans=True)
array([[1.],
[2.]])
See Also
--------
to_time_series_dataset : Transforms a dataset of time series
"""
ts_out = numpy.array(ts, copy=True)
if ts_out.ndim <= 1:
ts_out = ts_out.reshape((-1, 1))
if ts_out.dtype != numpy.float:
ts_out = ts_out.astype(numpy.float)
if remove_nans:
ts_out = ts_out[:ts_size(ts_out)]
return ts_out
def to_time_series_dataset(dataset, dtype=numpy.float):
"""Transforms a time series dataset so that it fits the format used in
``tslearn`` models.
Parameters
----------
dataset : array-like
The dataset of time series to be transformed. A single time series will
be automatically wrapped into a dataset with a single entry.
dtype : data type (default: numpy.float)
Data type for the returned dataset.
Returns
-------
numpy.ndarray of shape (n_ts, sz, d)
The transformed dataset of time series.
Examples
--------
>>> to_time_series_dataset([[1, 2]])
array([[[1.],
[2.]]])
>>> to_time_series_dataset([1, 2])
array([[[1.],
[2.]]])
>>> to_time_series_dataset([[1, 2], [1, 4, 3]])
array([[[ 1.],
[ 2.],
[nan]],
<BLANKLINE>
[[ 1.],
[ 4.],
[ 3.]]])
>>> to_time_series_dataset([]).shape
(0, 0, 0)
See Also
--------
to_time_series : Transforms a single time series
"""
try:
import pandas as pd
if isinstance(dataset, pd.DataFrame):
return to_time_series_dataset(numpy.array(dataset))
except ImportError:
pass
if isinstance(dataset, NotAnArray): # Patch to pass sklearn tests
return to_time_series_dataset(numpy.array(dataset))
if len(dataset) == 0:
return numpy.zeros((0, 0, 0))
if numpy.array(dataset[0]).ndim == 0:
dataset = [dataset]
n_ts = len(dataset)
max_sz = max([ts_size(to_time_series(ts, remove_nans=True))
for ts in dataset])
d = to_time_series(dataset[0]).shape[1]
dataset_out = numpy.zeros((n_ts, max_sz, d), dtype=dtype) + numpy.nan
for i in range(n_ts):
ts = to_time_series(dataset[i], remove_nans=True)
dataset_out[i, :ts.shape[0]] = ts
return dataset_out.astype(dtype)
def time_series_to_str(ts, fmt="%.18e"):
"""Transforms a time series to its representation as a string (used when
saving time series to disk).
Parameters
----------
ts : array-like
Time series to be represented.
fmt : string (default: "%.18e")
Format to be used to write each value (only ASCII characters).
Returns
-------
string
String representation of the time-series.
Examples
--------
>>> time_series_to_str([1, 2, 3, 4], fmt="%.1f")
'1.0 2.0 3.0 4.0'
>>> time_series_to_str([[1, 3], [2, 4]], fmt="%.1f")
'1.0 2.0|3.0 4.0'
See Also
--------
load_time_series_txt : Load time series from disk
str_to_time_series : Transform a string into a time series
"""
ts_ = to_time_series(ts)
out = StringIO()
numpy.savetxt(out, ts_.T, fmt=fmt, delimiter=" ", newline="|", encoding="bytes")
return out.getvalue()[:-1] # cut away the trailing "|"
timeseries_to_str = time_series_to_str
def str_to_time_series(ts_str):
"""Reads a time series from its string representation (used when loading
time series from disk).
Parameters
----------
ts_str : string
String representation of the time-series.
Returns
-------
numpy.ndarray
Represented time-series.
Examples
--------
>>> str_to_time_series("1 2 3 4")
array([[1.],
[2.],
[3.],
[4.]])
>>> str_to_time_series("1 2|3 4")
array([[1., 3.],
[2., 4.]])
See Also
--------
load_time_series_txt : Load time series from disk
time_series_to_str : Transform a time series into a string
"""
dimensions = ts_str.split("|")
ts = [numpy.fromstring(dim_str, sep=" ") for dim_str in dimensions]
return to_time_series(numpy.transpose(ts))
str_to_timeseries = str_to_time_series
def save_time_series_txt(fname, dataset, fmt="%.18e"):
"""Writes a time series dataset to disk.
Parameters
----------
fname : string
Path to the file in which time series should be written.
dataset : array-like
The dataset of time series to be saved.
fmt : string (default: "%.18e")
Format to be used to write each value.
Examples
--------
>>> dataset = to_time_series_dataset([[1, 2, 3, 4], [1, 2, 3]])
>>> save_time_series_txt("tmp-tslearn-test.txt", dataset)
See Also
--------
load_time_series_txt : Load time series from disk
"""
with open(fname, "w") as f:
for ts in dataset:
f.write(time_series_to_str(ts, fmt=fmt) + "\n")
save_timeseries_txt = save_time_series_txt
def load_time_series_txt(fname):
"""Loads a time series dataset from disk.
Parameters
----------
fname : string
Path to the file from which time series should be read.
Returns
-------
numpy.ndarray or array of numpy.ndarray
The dataset of time series.
Examples
--------
>>> dataset = to_time_series_dataset([[1, 2, 3, 4], [1, 2, 3]])
>>> save_time_series_txt("tmp-tslearn-test.txt", dataset)
>>> reloaded_dataset = load_time_series_txt("tmp-tslearn-test.txt")
See Also
--------
save_time_series_txt : Save time series to disk
"""
with open(fname, "r") as f:
return to_time_series_dataset([
str_to_time_series(row)
for row in f.readlines()
])
load_timeseries_txt = load_time_series_txt
def check_equal_size(dataset):
"""Check if all time series in the dataset have the same size.
Parameters
----------
dataset: array-like
The dataset to check.
Returns
-------
bool
Whether all time series in the dataset have the same size.
Examples
--------
>>> check_equal_size([[1, 2, 3], [4, 5, 6], [5, 3, 2]])
True
>>> check_equal_size([[1, 2, 3, 4], [4, 5, 6], [5, 3, 2]])
False
>>> check_equal_size([])
True
"""
dataset_ = to_time_series_dataset(dataset)
if len(dataset_) == 0:
return True
size = ts_size(dataset[0])
return all(ts_size(ds) == size for ds in dataset_[1:])
def ts_size(ts):
"""Returns actual time series size.
Final timesteps that have `NaN` values for all dimensions will be removed
from the count. Infinity and negative infinity ar considered valid time
series values.
Parameters
----------
ts : array-like
A time series.
Returns
-------
int
Actual size of the time series.
Examples
--------
>>> ts_size([1, 2, 3, numpy.nan])
3
>>> ts_size([1, numpy.nan])
1
>>> ts_size([numpy.nan])
0
>>> ts_size([[1, 2],
... [2, 3],
... [3, 4],
... [numpy.nan, 2],
... [numpy.nan, numpy.nan]])
4
>>> ts_size([numpy.nan, 3, numpy.inf, numpy.nan])
3
"""
ts_ = to_time_series(ts)
sz = ts_.shape[0]
while sz > 0 and numpy.all(numpy.isnan(ts_[sz - 1])):
sz -= 1
return sz
def ts_zeros(sz, d=1):
"""Returns a time series made of zero values.
Parameters
----------
sz : int
Time series size.
d : int (optional, default: 1)
Time series dimensionality.
Returns
-------
numpy.ndarray
A time series made of zeros.
Examples
--------
>>> ts_zeros(3, 2) # doctest: +NORMALIZE_WHITESPACE
array([[0., 0.],
[0., 0.],
[0., 0.]])
>>> ts_zeros(5).shape
(5, 1)
"""
return numpy.zeros((sz, d))
def check_dataset(X, force_univariate=False, force_equal_length=False,
force_single_time_series=False):
"""Check if X is a valid tslearn dataset, with possibly additional extra
constraints.
Parameters
----------
X: array, shape = (n_ts, sz, d)
Time series dataset.
force_univariate: bool (default: False)
If True, only univariate datasets are considered valid.
force_equal_length: bool (default: False)
If True, only equal-length datasets are considered valid.
force_single_time_series: bool (default: False)
If True, only datasets made of a single time series are considered
valid.
Returns
-------
array, shape = (n_ts, sz, d)
Formatted dataset, if it is valid
Raises
------
ValueError
Raised if X is not a valid dataset, or one of the constraints is not
satisfied.
Examples
--------
>>> X = [[1, 2, 3], [1, 2, 3, 4]]
>>> X_new = check_dataset(X)
>>> X_new.shape
(2, 4, 1)
>>> check_dataset(
... X,
... force_equal_length=True
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: All the time series in the array should be of equal lengths.
>>> other_X = numpy.random.randn(3, 10, 2)
>>> check_dataset(
... other_X,
... force_univariate=True
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Array should be univariate and is of shape: (3, 10, 2)
>>> other_X = numpy.random.randn(3, 10, 2)
>>> check_dataset(
... other_X,
... force_single_time_series=True
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Array should be made of a single time series (3 here)
"""
X_ = to_time_series_dataset(X)
if force_univariate and X_.shape[2] != 1:
raise ValueError(
"Array should be univariate and is of shape: {}".format(
X_.shape
)
)
if force_equal_length and not check_equal_size(X_):
raise ValueError("All the time series in the array should be of "
"equal lengths")
if force_single_time_series and X_.shape[0] != 1:
raise ValueError("Array should be made of a single time series "
"({} here)".format(X_.shape[0]))
return X_
class LabelCategorizer(TransformerMixin, TimeSeriesBaseEstimator):
"""Transformer to transform indicator-based labels into categorical ones.
Attributes
----------
single_column_if_binary : boolean (optional, default: False)
If true, generate a single column for binary classification case.
Otherwise, will generate 2.
If there are more than 2 labels, thie option will not change anything.
forward_match : dict
A dictionary that maps each element that occurs in the label vector
on a index {y_i : i} with i in [0, C - 1], C the total number of
unique labels and y_i the ith unique label.
backward_match : array-like
An array that maps an index back to the original label. Where
backward_match[i] results in y_i.
Examples
--------
>>> y = numpy.array([-1, 2, 1, 1, 2])
>>> lc = LabelCategorizer()
>>> lc.fit_transform(y)
array([[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> lc.inverse_transform([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
array([ 1., 2., -1.])
>>> y = numpy.array([-1, 2, -1, -1, 2])
>>> lc = LabelCategorizer(single_column_if_binary=True)
>>> lc.fit_transform(y)
array([[1.],
[0.],
[1.],
[1.],
[0.]])
>>> lc.inverse_transform(lc.transform(y))
array([-1., 2., -1., -1., 2.])
References
----------
.. [1] <NAME> et al. Learning Time-Series Shapelets. SIGKDD 2014.
"""
def __init__(self, single_column_if_binary=False, forward_match=None,
backward_match=None):
self.single_column_if_binary = single_column_if_binary
self.forward_match = forward_match
self.backward_match = backward_match
def _init(self):
self.forward_match = {}
self.backward_match = []
def fit(self, y):
self._init()
y = column_or_1d(y, warn=True)
values = sorted(set(y))
for i, v in enumerate(values):
self.forward_match[v] = i
self.backward_match.append(v)
return self
def transform(self, y):
check_is_fitted(self, ['backward_match', 'forward_match'])
y = column_or_1d(y, warn=True)
n_classes = len(self.backward_match)
n = len(y)
y_out = numpy.zeros((n, n_classes))
for i in range(n):
y_out[i, self.forward_match[y[i]]] = 1
if n_classes == 2 and self.single_column_if_binary:
return y_out[:, 0].reshape((-1, 1))
else:
return y_out
def inverse_transform(self, y):
check_is_fitted(self, ['backward_match', 'forward_match'])
y_ = numpy.array(y)
n, n_c = y_.shape
if n_c == 1 and self.single_column_if_binary:
y_ = numpy.hstack((y_, 1 - y_))
y_out = numpy.zeros((n, ))
for i in range(n):
y_out[i] = self.backward_match[y_[i].argmax()]
return y_out
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = TimeSeriesBaseEstimator.get_params(self, deep=deep)
out["single_column_if_binary"] = self.single_column_if_binary
out["forward_match"] = self.forward_match
out["backward_match"] = self.backward_match
return out
def _more_tags(self):
return {'X_types': ['1dlabels']}
def _load_arff_uea(dataset_path):
"""Load arff file for uni/multi variate dataset
Parameters
----------
dataset_path: string of dataset_path
Path to the ARFF file to be read
Returns
-------
x: numpy array of shape (n_timeseries, n_timestamps, n_features)
Time series dataset
y: numpy array of shape (n_timeseries, )
Vector of targets
Raises
------
ImportError: if the version of *Scipy* is too old (pre 1.3.0)
Exception: on any failure, e.g. if the given file does not exist or is
corrupted
"""
if not HAS_ARFF:
raise ImportError("scipy 1.3.0 or newer is required to load "
"time series datasets from arff format.")
data, meta = arff.loadarff(dataset_path)
names = meta.names() # ["input", "class"] for multi-variate
# firstly get y_train
y_ = data[names[-1]] # data["class"]
y = numpy.array(y_).astype("str")
# get x_train
if len(names) == 2: # len=2 => multi-variate
x_ = data[names[0]]
x_ = numpy.asarray(x_.tolist())
nb_example = x_.shape[0]
nb_channel = x_.shape[1]
length_one_channel = len(x_.dtype.descr)
x = numpy.empty([nb_example, length_one_channel, nb_channel])
for i in range(length_one_channel):
# x_.dtype.descr: [('t1', '<f8'), ('t2', '<f8'), ('t3', '<f8')]
time_stamp = x_.dtype.descr[i][0] # ["t1", "t2", "t3"]
x[:, i, :] = x_[time_stamp]
else: # uni-variate situation
x_ = data[names[:-1]]
x = numpy.asarray(x_.tolist(), dtype=numpy.float32)
x = x.reshape(len(x), -1, 1)
return x, y
def _load_txt_uea(dataset_path):
"""Load arff file for uni/multi variate dataset
Parameters
----------
dataset_path: string of dataset_path
Path to the TXT file to be read
Returns
-------
x: numpy array of shape (n_timeseries, n_timestamps, n_features)
Time series dataset
y: numpy array of shape (n_timeseries, )
Vector of targets
Raises
------
Exception: on any failure, e.g. if the given file does not exist or is
corrupted
"""
data = numpy.loadtxt(dataset_path)
X = to_time_series_dataset(data[:, 1:])
y = data[:, 0].astype(numpy.int)
return X, y
|
454060
|
import redis
import connection
from twisted.internet.threads import deferToThread
from scrapy.utils.serialize import ScrapyJSONEncoder
class RedisPipeline(object):
"""将serialized item数据push进一个redis list/queue中"""
def __init__(self, server):
self.server = server
self.encoder = ScrapyJSONEncoder()
@classmethod
def from_settings(cls, settings):
server = connection.from_settings(settings)
return cls(server)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def process_item(self, item, spider):
return deferToThread(self._process_item, item, spider)
def _process_item(self, item, spider):
key = self.item_key(item, spider)
data = self.encoder.encode(item)
self.server.rpush(key, data)
return item
def item_key(self, item, spider):
"""基于给定的spider返回redis key"""
return "%s:items" % spider.name
|
454071
|
import os
import os.path as osp
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class CUB200(Dataset):
def __init__(self, root='./', train=True,
index_path=None, index=None, base_sess=None):
self.root = os.path.expanduser(root)
self.train = train # training set or test set
self._pre_operate(self.root)
if train:
self.transform = transforms.Compose([
transforms.Resize(256),
# transforms.CenterCrop(224),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# self.data, self.targets = self.SelectfromTxt(self.data2label, index_path)
if base_sess:
self.data, self.targets = self.SelectfromClasses(self.data, self.targets, index)
else:
self.data, self.targets = self.SelectfromTxt(self.data2label, index_path)
else:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.data, self.targets = self.SelectfromClasses(self.data, self.targets, index)
def text_read(self, file):
with open(file, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
lines[i] = line.strip('\n')
return lines
def list2dict(self, list):
dict = {}
for l in list:
s = l.split(' ')
id = int(s[0])
cls = s[1]
if id not in dict.keys():
dict[id] = cls
else:
raise EOFError('The same ID can only appear once')
return dict
def _pre_operate(self, root):
image_file = os.path.join(root, 'CUB_200_2011/images.txt')
split_file = os.path.join(root, 'CUB_200_2011/train_test_split.txt')
class_file = os.path.join(root, 'CUB_200_2011/image_class_labels.txt')
id2image = self.list2dict(self.text_read(image_file))
id2train = self.list2dict(self.text_read(split_file)) # 1: train images; 0: test iamges
id2class = self.list2dict(self.text_read(class_file))
train_idx = []
test_idx = []
for k in sorted(id2train.keys()):
if id2train[k] == '1':
train_idx.append(k)
else:
test_idx.append(k)
self.data = []
self.targets = []
self.data2label = {}
if self.train:
for k in train_idx:
image_path = os.path.join(root, 'CUB_200_2011/images', id2image[k])
self.data.append(image_path)
self.targets.append(int(id2class[k]) - 1)
self.data2label[image_path] = (int(id2class[k]) - 1)
else:
for k in test_idx:
image_path = os.path.join(root, 'CUB_200_2011/images', id2image[k])
self.data.append(image_path)
self.targets.append(int(id2class[k]) - 1)
self.data2label[image_path] = (int(id2class[k]) - 1)
def SelectfromTxt(self, data2label, index_path):
index = open(index_path).read().splitlines()
data_tmp = []
targets_tmp = []
for i in index:
img_path = os.path.join(self.root, i)
data_tmp.append(img_path)
targets_tmp.append(data2label[img_path])
return data_tmp, targets_tmp
def SelectfromClasses(self, data, targets, index):
data_tmp = []
targets_tmp = []
for i in index:
ind_cl = np.where(i == targets)[0]
for j in ind_cl:
data_tmp.append(data[j])
targets_tmp.append(targets[j])
return data_tmp, targets_tmp
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, targets = self.data[i], self.targets[i]
image = self.transform(Image.open(path).convert('RGB'))
return image, targets
if __name__ == '__main__':
txt_path = "../../data/index_list/cub200/session_1.txt"
# class_index = open(txt_path).read().splitlines()
base_class = 100
class_index = np.arange(base_class)
dataroot = '~/dataloader/data'
batch_size_base = 400
trainset = CUB200(root=dataroot, train=False, index=class_index,
base_sess=True)
cls = np.unique(trainset.targets)
trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=batch_size_base, shuffle=True, num_workers=8,
pin_memory=True)
# txt_path = "../../data/index_list/cifar100/session_2.txt"
# # class_index = open(txt_path).read().splitlines()
# class_index = np.arange(base_class)
# trainset = CIFAR100(root=dataroot, train=True, download=True, transform=None, index=class_index,
# base_sess=True)
# cls = np.unique(trainset.targets)
# trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=batch_size_base, shuffle=True, num_workers=8,
# pin_memory=True)
|
454204
|
import click
@click.command()
def cli():
click.echo("Hello, World!")
if __name__ == '__main__':
cli()
|
454214
|
from collections import namedtuple
from gooey.gui.widgets import components
is_required = lambda widget: widget['required']
is_checkbox = lambda widget: isinstance(widget, components.CheckBox)
ComponentList = namedtuple('ComponentList', 'required_args optional_args')
def build_components(widget_list):
'''
:param widget_list: list of dicts containing widget info (name, type, etc..)
:return: ComponentList
Converts the Json widget information into concrete wx Widget types
'''
required_args, optional_args = partition(widget_list, is_required)
checkbox_args, general_args = partition(map(build_widget, optional_args), is_checkbox)
required_args = map(build_widget, required_args)
optional_args = general_args + checkbox_args
return ComponentList(required_args, optional_args)
def build_widget(widget_info):
widget_class = getattr(components, widget_info['type'])
return widget_class(data=widget_info['data'])
def partition(collection, condition):
return filter(condition, collection), filter(lambda x: not condition(x), collection)
|
454238
|
from django.views.generic import TemplateView
from .constants import SCENES
class SceneView(TemplateView):
template_name = 'scenes/scene.html'
def get_context_data(self, **kwargs):
context = super(SceneView, self).get_context_data(**kwargs)
context['scenes'] = SCENES
return context
|
454249
|
import pytest
from bayesian.factor_graph import *
def pytest_funcarg__x1(request):
x1 = VariableNode('x1')
return x1
def pytest_funcarg__x2(request):
x2 = VariableNode('x2')
return x2
def pytest_funcarg__fA_node(request):
def fA(x1):
return 0.5
fA_node = FactorNode('fA', fA)
return fA_node
def pytest_funcarg__simple_valid_graph(request):
def fA(x1):
return 0.5
fA_node = FactorNode('fA', fA)
x1 = VariableNode('x1')
connect(fA_node, x1)
graph = FactorGraph([fA_node, x1])
return graph
def pytest_funcarg__graph_with_function_as_node(request):
'''
A common error is to instantiate the
graph with the function instead of
the function node wrapper.
'''
def fA(x1):
return 0.5
fA_node = FactorNode('fA', fA)
x1 = VariableNode('x1')
connect(fA_node, x1)
graph = FactorGraph([fA, x1])
return graph
def pytest_funcarg__graph_with_empty_func_domains(request):
def fA(x1):
return 0.5
fA_node = FactorNode('fA', fA)
x1 = VariableNode('x1')
connect(fA_node, x1)
graph = FactorGraph([fA_node, x1])
fA_node.func.domains = {}
return graph
def pytest_funcarg__graph_with_missing_func_domains(request):
def fA(x1):
return 0.5
fA_node = FactorNode('fA', fA)
x1 = VariableNode('x1')
connect(fA_node, x1)
graph = FactorGraph([fA_node, x1])
delattr(fA_node.func, 'domains')
return graph
def pytest_funcarg__graph_with_cycle(request):
'''
This graph looks like this BBN:
x1 x2----+
| | |
+----+----+ |
| |
x3 |
| |
+-----+----+
|
x4
'''
def fA(x1):
return 0.5
def fB(x2):
return 0.5
def fC(x1, x2, x3):
return 0.5
def fD(x2, x3, x4):
return 0.5
graph = build_graph(fA, fB, fC, fD)
return graph
class TestVerify():
def test_verify_variable_node_neighbour_type(self, x1, fA_node):
connect(fA_node, x1)
assert fA_node.verify_neighbour_types() is True
assert x1.verify_neighbour_types() is True
def test_verify_variable_node_neighbour_type_symmetry(self, x1, fA_node):
connect(x1, fA_node)
assert fA_node.verify_neighbour_types() is True
assert x1.verify_neighbour_types() is True
def test_verify_variable_node_wrong_neighbour_type(self, x1, x2):
connect(x1, x2)
assert x1.verify_neighbour_types() is False
assert x2.verify_neighbour_types() is False
def test_nodes_of_correct_type(self, simple_valid_graph):
assert simple_valid_graph.verify() is True
def test_broken_graph_bad_factor_node(self, graph_with_function_as_node):
'''
Make sure exception is raised for
broken graph.
'''
with pytest.raises(InvalidGraphException):
graph_with_function_as_node.verify()
def test_broken_graph_empty_factor_domains(
self, graph_with_empty_func_domains):
"""Ensure exception is raised for broken graph."""
with pytest.raises(InvalidGraphException):
graph_with_empty_func_domains.verify()
def test_broken_graph_missing_factor_domains(
self, graph_with_missing_func_domains):
"""Ensureexception is raised for broken graph."""
with pytest.raises(InvalidGraphException):
graph_with_missing_func_domains.verify()
def test_graph_has_no_cycles(self, simple_valid_graph):
assert simple_valid_graph.has_cycles() is False
def test_graph_has_cycles(self, graph_with_cycle):
assert graph_with_cycle.has_cycles() is True
|
454281
|
import unittest
import json
from aiohttp import web
from aiohttp_auth import auth, auth_middleware
from aiohttp_auth import acl, acl_middleware
from aiohttp_auth.permissions import Group, Permission
from aiohttp_session import session_middleware, SimpleCookieStorage
from .util import asyncio
from .util.aiohttp.test import (
make_request,
make_response,
make_auth_session)
class ACLMiddlewareTests(unittest.TestCase):
# Secret used in all the tests
SECRET = b'<KEY>'
def setUp(self):
"""Creates the storage and middlewares objects"""
self.storage = SimpleCookieStorage()
self.auth = auth.SessionTktAuthentication(
self.SECRET, 15, cookie_name='auth')
@asyncio.run_until_complete()
async def test_no_middleware_installed(self):
session_data = make_auth_session(
self.SECRET, 'some_user', self.auth.cookie_name)
request = await make_request('GET', '/', self._middleware(None), \
[(self.storage.cookie_name, json.dumps(session_data))])
with self.assertRaises(RuntimeError):
groups = await acl.get_user_groups(request)
@asyncio.run_until_complete()
async def test_correct_groups_returned_for_authenticated_user(self):
session_data = make_auth_session(
self.SECRET, 'some_user', self.auth.cookie_name)
request = await make_request('GET', '/', \
self._middleware(self._groups_callback), \
[(self.storage.cookie_name, json.dumps(session_data))])
groups = await acl.get_user_groups(request)
self.assertIn('group0', groups)
self.assertIn('group1', groups)
self.assertIn('some_user', groups)
self.assertIn(Group.Everyone, groups)
self.assertIn(Group.AuthenticatedUser, groups)
@asyncio.run_until_complete()
async def test_correct_groups_returned_for_unauthenticated_user(self):
request = await make_request('GET', '/', \
self._middleware(self._groups_callback))
groups = await acl.get_user_groups(request)
self.assertIn('group0', groups)
self.assertIn('group1', groups)
self.assertNotIn('some_user', groups)
self.assertNotIn(None, groups)
self.assertIn(Group.Everyone, groups)
self.assertNotIn(Group.AuthenticatedUser, groups)
@asyncio.run_until_complete()
async def test_no_groups_if_none_returned_from_callback(self):
request = await make_request('GET', '/', \
self._middleware(self._none_groups_callback))
groups = await acl.get_user_groups(request)
self.assertIsNone(groups)
@asyncio.run_until_complete()
async def test_acl_permissions(self):
request = await make_request('GET', '/', \
self._middleware(self._groups_callback))
context = [(Permission.Allow, 'group0', ('test0',)),
(Permission.Deny, 'group1', ('test1',)),
(Permission.Allow, Group.Everyone, ('test1',)),]
self.assertTrue(await acl.get_permitted(request, 'test0', context))
self.assertFalse(await acl.get_permitted(request, 'test1', context))
@asyncio.run_until_complete()
async def test_permission_order(self):
session_data = make_auth_session(
self.SECRET, 'some_user', self.auth.cookie_name)
request0 = await make_request('GET', '/', \
self._middleware(self._auth_groups_callback), \
[(self.storage.cookie_name, json.dumps(session_data))])
request1 = await make_request('GET', '/', \
self._middleware(self._auth_groups_callback))
context = [(Permission.Allow, Group.Everyone, ('test0',)),
(Permission.Deny, 'group1', ('test1',)),
(Permission.Allow, Group.Everyone, ('test1',)),]
self.assertTrue(await acl.get_permitted(request0, 'test0', context))
self.assertTrue(await acl.get_permitted(request1, 'test0', context))
self.assertFalse(await acl.get_permitted(request0, 'test1', context))
self.assertTrue(await acl.get_permitted(request1, 'test1', context))
async def _groups_callback(self, user_id):
"""Groups callback function that always returns two groups"""
return ('group0', 'group1')
async def _auth_groups_callback(self, user_id):
"""Groups callback function that always returns two groups"""
if user_id:
return ('group0', 'group1')
return ()
async def _none_groups_callback(self, user_id):
"""Groups callback function that always returns None"""
return None
def _middleware(self, acl_callback):
"""Returns the middlewares used in the test"""
if acl_callback:
return [
session_middleware(self.storage),
auth_middleware(self.auth),
acl_middleware(acl_callback)]
return [
session_middleware(self.storage),
auth_middleware(self.auth)]
|
454295
|
from __future__ import division, print_function
import sys
from numpy import zeros, ones, int32, hstack, newaxis, log, sqrt, arange,\
all, abs, where
from numpy.random import uniform, normal, seed, randint, choice, exponential
class HOSet(object):
"""
nstates - number of HO states to sample
max_samples - maximum number of samples to draw from a given HO
klow - minimum force constant
khi - maximum force constant
randeed - number generator seed (for repeatable systems)
sample_fudge - percent fudge in reduction from max_samples
"""
def __init__(self, nstates=2, max_samples=10, klow=1.0e-1, khi=1.0e1,
randseed=None, sample_fudge=0.0, unsampled_states=0):
self.max_samples = int(max_samples)
self.nstates = int(nstates)
# Randomize the HO parameters.
seed(randseed)
klow, khi = float(klow), float(khi)
#spacing = uniform(self.nstates, size=self.nstates)
#k = klow*(khi / klow)**(spacing / self.nstates)
# k = uniform(float(klow), float(khi), size=self.nstates)
k = klow + (khi - klow)*exponential(1.0, self.nstates)
sigma = sqrt(1/k)
x0 = uniform(-0.5*sigma.max(), 0.5*sigma.max(), size=self.nstates)
# Choose which states to sample from.
nsampled_states = self.nstates - int(unsampled_states)
sampled_indices = choice(arange(self.nstates), nsampled_states, False)
sampled_indices.sort()
# Generate samples up to max.
x_in = normal(0.0, 1.0, (nsampled_states, self.max_samples))
x_in *= sigma[sampled_indices, newaxis]
x_in += x0[sampled_indices, newaxis]
self.data_size = zeros(self.nstates, int32)
self.data_size[sampled_indices] += self.max_samples
# Randomly remove samples for uneven sampling. Note that at least one
# state must remain the same, otherwise max_samples is incorrect.
# Also, we don't actually have to do anything to the actual samples, bc
# the sample size is used as a mask!
#
del_max = int(sample_fudge*self.max_samples + 0.5) + 1
if del_max > 1:
sample_shift = randint(0, del_max, nsampled_states)
if all(sample_shift > 0): # Randomly reset the shift for a state.
sample_shift[choice(arange(nsampled_states))] = 0
self.data_size[sampled_indices] -= sample_shift
self.unsampled_indices = where(self.data_size == 0)[0]
# Compute the energy in all states
u_ijn = 0.5*(k[:, newaxis]*(x_in[:, newaxis, :] - x0[:, newaxis])**2)
self.u_ijn = u_ijn
self.f_actual = 0.5*log(k / k[0])[1:]
self.x0 = x0
self.x_jn = x_in
@property
def data(self):
return self.u_ijn
@property
def unsampled_data(self):
"""Return just the energies from states with no samples"""
idx = self.unsampled_indices
u_jn_k = zeros((idx.size, self.data_size.sum()))
shift = 0
for i, n in enumerate(self.data_size):
if n == 0:
shift += 1
continue
j = self.data_size[:i].sum()
u_jn_k[:, j:(j+n)] = self.u_ijn[i-shift, idx, :n]
return u_jn_k
if __name__ == '__main__':
import sys
import copy
from timeit import Timer
from pynamd import MSMLE
try:
from pymbar import MBAR
do_pymbar = True
except ImportError:
do_pymbar = False
try:
mseed = int(sys.argv[1])
except IndexError:
mseed = None
klow = 1.0e-3
kmax = 5.0e2
dotest = True
dobench = False
if dotest:
samples_per_state = 500
nstates = 8
bootstrap_trials = 200
test = HOSet(nstates, samples_per_state, klow, kmax, mseed, 0.15, 2)
f3 = test.f_actual
msmle = MSMLE(test.data, test.data_size)
msmle.solve_uwham()
f1 = msmle.f
f1 -= f1[0]
ferr1 = sqrt(msmle.fvar[1:])
xbar1, varxbar1 = msmle.compute_expectations(test.x_jn)
ferr1_bs = zeros(f1.size)
varxbar1_bs = zeros(xbar1.size)
if bootstrap_trials > 1:
f1_bs = zeros((bootstrap_trials, f1.size))
xbar1_bs = zeros((bootstrap_trials, xbar1.size))
for trial in xrange(bootstrap_trials):
msmle.resample()
msmle.solve_uwham(f1)
f1_bs[trial] = msmle.f
f1_bs[trial] -= msmle.f[0]
xbar1_bs[trial] = msmle.compute_expectations(test.x_jn, False)[0]
ferr1_bs = f1_bs.std(axis=0)[1:]
varxbar1_bs = xbar1_bs.var(axis=0)
msmle.revert_sample()
f1 = f1[1:]
if do_pymbar:
try:
mbar = MBAR(test.data, test.data_size)
f2, ferr2, t = mbar.getFreeEnergyDifferences()
f2 = f2[0][1:]
ferr2 = ferr2[0][1:]
xbar2, varxbar2 = mbar.computeExpectations(test.x_jn)
skipmbar = False
except:
print('MBAR choked!')
skipmbar = True
pass
else:
skipmbar = True
def print_float_array(msg, arr):
print('%-16s '%msg + ' '.join(('% 6.4f'%x for x in arr)))
print('samples:', test.data_size)
print_float_array('actual energies', f3)
print_float_array('uwham energies', f1)
print_float_array('uwham bst mean', f1_bs.mean(axis=0)[1:])
print_float_array('uwham act. err', abs(f1 - f3))
print_float_array('uwham est. err', ferr1)
print_float_array('uwham bst. err', ferr1_bs)
if not skipmbar:
print_float_array('mbar energies', f2)
print_float_array('mbar est. err', ferr2)
print_float_array('actual means', test.x0)
test_means = zeros(test.nstates)
test_means[msmle.mask_nonzero] += test.x_jn.mean(axis=1)
print_float_array('unweighted means', test_means)
print_float_array('uwham means', xbar1)
print_float_array('act. err', abs(xbar1 - test.x0))
print_float_array('est. err', sqrt(varxbar1))
print_float_array('bst. err', sqrt(varxbar1_bs))
if not skipmbar:
print_float_array('mbar means', xbar2)
print_float_array('act. err', abs(xbar2 - test.x0))
print_float_array('est. ett', sqrt(varxbar2))
if dobench:
for samples_per_state in (100,): #(50, 100, 400, 800):
for nstates in (1000,): #(80, 160): #(10, 20, 40, 320):
tu = Timer('test = HOSet(%d, %d, %f, %f); msmle = MSMLE(test.data, test.data_size); msmle.solve_uwham()'%(nstates, samples_per_state, klow, kmax), 'from msmle import MSMLE; from hotest import HOSet')
tm = Timer('test = HOSet(%d, %d, %f, %f); mbar = MBAR(test.data, test.data_size)'%(nstates, samples_per_state, klow, kmax), 'from pymbar import MBAR; from hotest import HOSet')
t1 = tu.timeit(1)
t2 = tm.timeit(1)
print(nstates, samples_per_state, t1, t2, t2/t1)
|
454334
|
import logging
logger = logging.getLogger(__name__)
TORRENT_CLIENTS = {}
try:
from .rtorrent import RTorrentClient
TORRENT_CLIENTS[RTorrentClient.identifier] = RTorrentClient
logger.debug('Enabled client %s' % RTorrentClient.identifier)
except ImportError:
logger.debug('Failed to enable client rtorrent')
try:
from .deluge import DelugeClient
TORRENT_CLIENTS[DelugeClient.identifier] = DelugeClient
logger.debug('Enabled client %s' % DelugeClient.identifier)
except ImportError:
logger.debug('Failed to enable client deluge')
try:
from .transmission import TransmissionClient
TORRENT_CLIENTS[TransmissionClient.identifier] = TransmissionClient
logger.debug('Enabled client %s' % TransmissionClient.identifier)
except ImportError:
logger.debug('Failed to enable client transmission')
try:
from .qbittorrent import QBittorrentClient
TORRENT_CLIENTS[QBittorrentClient.identifier] = QBittorrentClient
logger.debug('Enabled client %s' % QBittorrentClient.identifier)
except ImportError:
logger.debug('Failed to enable client qbittorrent')
|
454351
|
import camoco as co
class Analysis(object):
"""
Perform an analysis based on CLI arguments:
set up, event loop, tear down
"""
def __init__(self):
# Init needs to just store args and other analysis level data
self.args = args
self.tag = "Analysis"
def __call__(self):
set_up()
event_loop()
tear_down()
def set_up(self):
pass
def event_loop(self):
pass
def tear_down(self):
pass
# ------------------------------------------------------------------------
# Extra methods should fit into the above methods
def _generate_output_file(self, filename):
if args.out != sys.stdout:
args.out = "{}_Locality.csv".format(args.out.replace(".csv", ""))
if os.path.dirname(args.out) != "":
os.makedirs(os.path.dirname(args.out), exist_ok=True)
if os.path.exists("{}_Locality.csv".format(args.out.replace(".csv", ""))):
print(
"{}_Locality.csv exists! Skipping!".format(args.out.replace(".csv", ""))
)
return None
def _build_camoco_objects(self):
pass
|
454371
|
import numpy as np
from scipy.sparse import csr_matrix
import warnings
from xclib.utils.sparse import csr_from_arrays, retain_topk
def topk(values, indices=None, k=10, sorted=False):
"""
Return topk values from a np.ndarray with support for optional
second array
Arguments:
---------
values: np.ndarray
select topk values based on this array
indices: np.ndarray or None, optional, default=None
second array; return corresponding entries for this array
as well; useful for key, value pairs
k: int, optional, default=10
k in top-k
sorted: boolean, optional, default=False
Sort the topk values or not
"""
assert values.shape[1] >= k, f"value has less than {k} values per row"
if indices is not None:
assert values.shape == indices.shape, \
f"Shape of values {values.shape} != indices {indices.shape}"
# Don't do anything if n_cols = k or k = -1
if k == indices.shape[1] or k == -1:
return values, indices
if not sorted:
ind = np.argpartition(values, -k)[:, -k:]
else:
ind = np.argpartition(
values, list(range(-k, 0)))[:, -k:][:, ::-1]
val = np.take_along_axis(values, ind, axis=-1)
if indices is not None:
out = (val, np.take_along_axis(indices, ind, axis=-1))
else:
out = (val, ind)
return out
class Prediction(object):
"""
Class to store and manipulate predictions
* This can be more suitable as:
- We already know num_instances & top_k
- space can be allocated in advance
- updation is faster
Support for:
* OVA predictions
* Predictions with a label shortlist
Uses num_labels as pad_ind; will remove the pad_ind with as_sparse()
Predictions may have:
* (batch_size, num_labels+1) shape for dense predictions
* num_labels as entry in ind array
Arguments:
----------
num_instances: int
lenght of 0th dimension
k: int
store k values per instance
num_labels: int
lenght of 1st dimension
pad indices with this value as well
k: int
the k in top-k
pad_val: float, optional, default=-1e5
default value of predictions
fname: float or None, optional, default=None
Use memmap files and store on disk if filename is provides
"""
def __init__(self, num_instances, num_labels, k, pad_val=-1e5, fname=None):
self.num_instances = num_instances
self.k = k
self.num_labels = num_labels
self.pad_ind = num_labels
self.pad_val = pad_val
self.indices = self._array(
fname + ".ind" if fname is not None else None,
fill_value=self.pad_ind,
dtype='int64')
self.values = self._array(
fname + ".val" if fname is not None else None,
fill_value=self.pad_val,
dtype='float32')
def _array(self, fname, fill_value, dtype):
if fname is None:
arr = np.full(
(self.num_instances, self.k),
fill_value=fill_value, dtype=dtype)
else:
arr = np.memmap(
fname, shape=(self.num_instances, self.k),
dtype=dtype, mode='w+')
arr[:] = fill_value
return arr
def data(self, format='sparse'):
"""Returns the predictions as a csr_matrix or indices & values arrays
"""
self.flush()
if format == 'sparse':
if not self.in_memory:
warnings.warn("Files on disk; will create copy in memory.")
return csr_from_arrays(
self.indices, self.values,
shape=(self.num_instances, self.num_labels+1))[:, :-1]
else:
return self.indices, self.values
def update_values(self, start_idx, vals, ind=None):
"""Update the entries as per given indices and values
"""
top_val, top_ind = self.topk(vals, ind)
_size = vals.shape[0]
self.values[start_idx: start_idx+_size, :] = top_val
self.indices[start_idx: start_idx+_size, :] = top_ind
def topk(self, vals, ind):
"""Assumes inputs are np.ndarrays/ Implement your own method
for some other type.
Output must be np.ndarrays
* if ind is None: will return corresponding indices of vals
typically used with OVA predictions
* otherwise: will use corresponding entries from ind
typically used with predictions with a label shortlist
"""
return topk(vals, ind, k=self.k)
@property
def in_memory(self):
return not isinstance(self.indices, np.memmap)
def flush(self):
if not self.in_memory:
self.indices.flush()
self.values.flush()
|
454411
|
class Iterator(object):
"""Base class of all dataset iterators.
Iterator iterates over the dataset, yielding a minibatch at each
iteration. Minibatch is a list of examples. Each implementation should
implement an iterator protocol (e.g., the :meth:`__next__` method).
Note that, even if the iterator supports setting the batch size, it does
not guarantee that each batch always contains the same number of examples.
For example, if you let the iterator to stop at the end of the sweep, the
last batch may contain a fewer number of examples.
The interface between the iterator and the underlying dataset is not fixed,
and up to the implementation.
Each implementation should provide the following attributes (not needed to
be writable).
- ``batch_size``: Number of examples within each minibatch.
- ``epoch``: Number of completed sweeps over the dataset.
- ``epoch_detail``: Floating point number version of the epoch. For
example, if the iterator is at the middle of the dataset at the third
epoch, then this value is 2.5.
- ``previous_epoch_detail``: The value of ``epoch_detail`` at the previous
iteration. This value is ``None`` before the first iteration.
- ``is_new_epoch``: ``True`` if the epoch count was incremented at the last
update.
Each implementation should also support serialization to resume/suspend the
iteration.
"""
def __del__(self):
self.finalize()
def __iter__(self):
"""Returns self."""
return self
def __next__(self):
"""Returns the next batch.
This is a part of the iterator protocol of Python. It may raise the
:class:`StopIteration` exception when it stops the iteration.
"""
raise NotImplementedError
def next(self):
"""Python2 alternative of ``__next__``.
It calls :meth:`__next__` by default.
"""
return self.__next__()
def finalize(self):
"""Finalizes the iterator and possibly releases the resources.
This method does nothing by default. Implementation may override it to
better handle the internal resources.
This method can be called multiple times.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.finalize()
def serialize(self, serializer):
"""Serializes the internal state of the iterator.
This is a method to support the serializer protocol of Chainer.
.. note::
It should only serialize the internal state that changes over the
iteration. It should not serialize what is set manually by
users such as the batch size.
"""
pass
|
454482
|
from setuptools import setup
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name="mlagents_envs",
version="0.10.1",
description="Unity Machine Learning Agents Interface",
url="https://github.com/Unity-Technologies/ml-agents",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=["mlagents.envs", "mlagents.envs.communicator_objects"], # Required
zip_safe=False,
install_requires=[
"cloudpickle",
"grpcio>=1.11.0",
"numpy>=1.13.3,<2.0",
"Pillow>=4.2.1",
"protobuf>=3.6",
],
python_requires=">=3.5",
)
|
454487
|
from __future__ import unicode_literals
import pytest # noqa
import os
import os.path
from pytest import raises
from textx import (get_location, metamodel_from_str,
metamodel_for_language,
register_language, clear_language_registrations)
import textx.scoping.providers as scoping_providers
import textx.scoping as scoping
import textx.exceptions
grammarA = """
Model: a+=A;
A:'A' name=ID;
"""
grammarB = """
reference A
Model: b+=B;
B:'B' name=ID '->' a=[A.A];
"""
grammarBWithImport = """
reference A as a
Model: imports+=Import b+=B;
B:'B' name=ID '->' a=[a.A];
Import: 'import' importURI=STRING;
"""
def register_languages():
clear_language_registrations()
global_repo = scoping.GlobalModelRepository()
global_repo_provider = scoping_providers.PlainNameGlobalRepo()
class A(object):
def __init__(self, **kwargs):
super(A, self).__init__()
for k, v in kwargs.items():
self.__dict__[k] = v
def __setattr__(self, name, value):
raise Exception("test: this is not allowed.")
def get_A_mm():
mm_A = metamodel_from_str(grammarA, global_repository=global_repo,
classes=[A])
mm_A.register_scope_providers({"*.*": global_repo_provider})
return mm_A
def get_B_mm():
mm_B = metamodel_from_str(grammarB, global_repository=global_repo)
mm_B.register_scope_providers({"*.*": global_repo_provider})
return mm_B
def get_BwithImport_mm():
mm_B = metamodel_from_str(grammarBWithImport,
global_repository=global_repo)
# define a default scope provider supporting the importURI feature
mm_B.register_scope_providers(
{"*.*": scoping_providers.FQNImportURI()})
return mm_B
register_language('A',
pattern="*.a",
metamodel=get_A_mm)
register_language('B',
pattern="*.b",
metamodel=get_B_mm)
register_language('BwithImport',
pattern="*.b",
metamodel=get_BwithImport_mm)
return global_repo_provider
def test_multi_metamodel_references1():
global_repo_provider = register_languages()
mm_A = metamodel_for_language('A')
mA = mm_A.model_from_str('''
A a1 A a2 A a3
''')
global_repo_provider.add_model(mA)
mm_B = metamodel_for_language('B')
mm_B.model_from_str('''
B b1 -> a1 B b2 -> a2 B b3 -> a3
''')
with raises(textx.exceptions.TextXSemanticError,
match=r'.*UNKNOWN.*'):
mm_B.model_from_str('''
B b1 -> a1 B b2 -> a2 B b3 -> UNKNOWN
''')
def test_multi_metamodel_references2():
mm_A = metamodel_from_str(grammarA)
mm_B = metamodel_from_str(grammarB)
global_repo_provider = scoping_providers.PlainNameGlobalRepo()
mm_B.register_scope_providers({"*.*": global_repo_provider})
mA = mm_A.model_from_str('''
A a1 A a2 A a3
''')
global_repo_provider.add_model(mA)
mm_B.model_from_str('''
B b1 -> a1 B b2 -> a2 B b3 -> a3
''')
with raises(textx.exceptions.TextXSemanticError,
match=r'.*UNKNOWN.*'):
mm_B.model_from_str('''
B b1 -> a1 B b2 -> a2 B b3 -> UNKNOWN
''')
def test_multi_metamodel_references_with_importURI():
# Use a global repo.
# This is useful, especially with circular includes or diamond shaped
# includes. Without such a repo, you might get double instantiations of
# model elements.
# However, if B includes A, but A not B, both meta models might have
# global repos on their own (global between model files of the same
# meta model --> global_repository=True). Circular dependencies
# will require shared grammars, like in test_metamodel_provider3.py,
# because it is not possible to share meta models for referencing, before
# the meta model is constructed (like in our example, mm_A cannot
# reference mm_B, if mm_B already references mm_A because one has to
# constructed first).
# Add a custom setattr for a rule used in the language with is imported
# via the importURI feature. This should test that the attr
# replacement also works for models not representing the "main outer
# model" of a load_from_xxx-call.
register_languages()
# Create two meta models with the global repo.
# The second meta model allows referencing the first one.
mm_A = metamodel_for_language('A')
mm_B = metamodel_for_language('BwithImport')
modelA = mm_A.model_from_str('''
A a1 A a2 A a3
''')
with raises(Exception,
match=r'.*test: this is not allowed.*'):
modelA.a[0].x = 1
# load a model from B which includes a model from A.
current_dir = os.path.dirname(__file__)
modelB = mm_B.model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'refs', 'b.b'))
# check that the classes from the correct meta model are used
# (and that the model was loaded).
assert modelB.b[0].__class__ == mm_B[modelB.b[0].__class__.__name__]
assert modelB.b[0].a.__class__ == mm_A[modelB.b[0].a.__class__.__name__]
with raises(Exception,
match=r'.*test: this is not allowed.*'):
modelB.b[0].a.x = 1
# -------------------------------------
class LibTypes:
""" Library for Typedefs:
type int
type string
"""
@staticmethod
def get_metamodel():
return metamodel_for_language('types')
@staticmethod
def library_init(repo_selector):
if repo_selector == "no global scope":
global_repo = False
elif repo_selector == "global repo":
global_repo = True
else:
raise Exception("unexpected parameter 'repo_selector={}'"
.format(repo_selector))
def get_metamodel():
mm = metamodel_from_str(
r'''
Model: types+=Type;
Type: 'type' name=ID;
Comment: /\/\/.*$/;
''',
global_repository=global_repo)
def check_type(t):
if t.name[0].isupper():
raise textx.exceptions.TextXSyntaxError(
"types must be lowercase",
**get_location(t)
)
mm.register_obj_processors({
'Type': check_type
})
return mm
register_language('types', pattern='*.type',
metamodel=get_metamodel)
class LibData:
""" Library for Datadefs:
data Point { x: int y: int}
data City { name: string }
data Population { count: int}
"""
@staticmethod
def get_metamodel():
return metamodel_for_language('data')
@staticmethod
def library_init(repo_selector):
if repo_selector == "no global scope":
global_repo = False
elif repo_selector == "global repo":
# get the global repo from the inherited meta model:
global_repo = LibTypes.get_metamodel()._tx_model_repository
else:
raise Exception("unexpected parameter 'repo_selector={}'"
.format(repo_selector))
def get_metamodel():
mm = metamodel_from_str(
r'''
reference types as t
Model: includes*=Include data+=Data;
Data: 'data' name=ID '{'
attributes+=Attribute
'}';
Attribute: name=ID ':' type=[t.Type];
Include: '#include' importURI=STRING;
Comment: /\/\/.*$/;
''',
global_repository=global_repo)
mm.register_scope_providers(
{"*.*": scoping_providers.FQNImportURI()})
return mm
register_language('data',
pattern='*.data',
metamodel=get_metamodel)
class LibFlow:
""" Library for DataFlows
algo A1 : Point -> City
algo A2 : City -> Population
connect A1 -> A2
"""
@staticmethod
def get_metamodel():
return metamodel_for_language('flow')
@staticmethod
def library_init(repo_selector):
if repo_selector == "no global scope":
global_repo = False
elif repo_selector == "global repo":
# get the global repo from the inherited meta model:
global_repo = LibData.get_metamodel()._tx_model_repository
else:
raise Exception("unexpected parameter 'repo_selector={}'"
.format(repo_selector))
def get_metamodel():
mm = metamodel_from_str(
r'''
reference data as d
Model: includes*=Include algos+=Algo flows+=Flow;
Algo: 'algo' name=ID ':' inp=[d.Data] '->' outp=[d.Data];
Flow: 'connect' algo1=[Algo] '->' algo2=[Algo] ;
Include: '#include' importURI=STRING;
Comment: /\/\/.*$/;
''',
global_repository=global_repo)
mm.register_scope_providers(
{"*.*": scoping_providers.FQNImportURI()})
def check_flow(f):
if f.algo1.outp != f.algo2.inp:
raise textx.exceptions.TextXSemanticError(
"algo data types must match",
**get_location(f)
)
mm.register_obj_processors({
'Flow': check_flow
})
return mm
register_language('flow',
pattern='*.flow',
metamodel=get_metamodel)
def test_multi_metamodel_types_data_flow1():
# this stuff normally happens in the python module directly of the
# third party lib
selector = "no global scope"
clear_language_registrations()
LibTypes.library_init(selector)
LibData.library_init(selector)
LibFlow.library_init(selector)
current_dir = os.path.dirname(__file__)
model1 = LibFlow.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'data_flow.flow')
)
# althought, types.type is included 2x, it is only present 1x
# (scope providers share a common repo within on model and all
# loaded models in that model)
assert 3 == len(model1._tx_model_repository.all_models)
# load the type model also used by model1
model2 = LibData.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'data_structures.data')
)
# load the type model also used by model1 and model2
model3 = LibTypes.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'types.type')
)
# the types (reloaded by the second model)
# are not shared with the first model
# --> no global repo
assert model1.algos[0].inp.attributes[0].type \
not in model2.includes[0]._tx_loaded_models[0].types
assert model1.algos[0].inp.attributes[0].type not in model3.types
def test_multi_metamodel_types_data_flow2():
# this stuff normally happens in the python module directly of the
# third party lib
selector = "global repo"
clear_language_registrations()
LibTypes.library_init(selector)
LibData.library_init(selector)
LibFlow.library_init(selector)
current_dir = os.path.dirname(__file__)
model1 = LibFlow.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'data_flow.flow')
)
# althought, types.type is included 2x, it is only present 1x
assert 3 == len(model1._tx_model_repository.all_models)
# load the type model also used by model1
model2 = LibData.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'data_structures.data')
)
# load the type model also used by model1 and model2
model3 = LibTypes.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'types.type')
)
# the types (reloaded by the second model)
# are shared with the first model
# --> global repo
assert model1.algos[0].inp.attributes[0].type \
in model2.includes[0]._tx_loaded_models[0].types
assert model1.algos[0].inp.attributes[0].type in model3.types
def test_multi_metamodel_types_data_flow_validation_error_in_types():
selector = "no global scope"
clear_language_registrations()
LibTypes.library_init(selector)
LibData.library_init(selector)
LibFlow.library_init(selector)
current_dir = os.path.dirname(__file__)
with raises(textx.exceptions.TextXSyntaxError,
match=r'.*lowercase.*'):
LibFlow.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'data_flow_including_error.flow')
)
def test_multi_metamodel_types_data_flow_validation_error_in_data_flow():
selector = "no global scope"
clear_language_registrations()
LibTypes.library_init(selector)
LibData.library_init(selector)
LibFlow.library_init(selector)
current_dir = os.path.dirname(__file__)
with raises(textx.exceptions.TextXSemanticError,
match=r'.*data types must match.*'):
LibFlow.get_metamodel().model_from_file(
os.path.join(current_dir, 'multi_metamodel', 'types_data_flow',
'data_flow_with_error.flow')
)
|
454518
|
from __future__ import print_function
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
from gdbclientutils import *
# This test case is testing three things:
#
# 1. three register values will be provided in the ? stop packet (T11) -
# registers 0 ("rax"), 1 ("rbx"), and 3 ("rip")
# 2. ReadRegister packet will provide the value of register 2 ("rsi")
# 3. The "g" read-all-registers packet is not supported; p must be used
# to get the value of register 2 ("rsi")
#
# Forcing lldb to use the expedited registers in the stop packet and
# marking it an error to request that register value is to prevent
# performance regressions.
#
# Some gdb RSP stubs only implement p/P, they do not support g/G.
# lldb must be able to work with either.
class TestNoGPacketSupported(GDBRemoteTestBase):
@skipIfXmlSupportMissing
def test(self):
class MyResponder(MockGDBServerResponder):
def haltReason(self):
return "T02thread:1ff0d;threads:1ff0d;thread-pcs:000000010001bc00;00:7882773ce0ffffff;01:1122334455667788;03:00bc010001000000;"
def threadStopInfo(self, threadnum):
return "T02thread:1ff0d;threads:1ff0d;thread-pcs:000000010001bc00;00:7882773ce0ffffff;01:1122334455667788;03:00bc010001000000;"
def writeRegisters(self):
return "E02"
def readRegisters(self):
return "E01"
def readRegister(self, regnum):
# lldb will try sending "p0" to see if the p packet is supported,
# give a bogus value; in theory lldb could use this value in the
# register context and that would be valid behavior.
# notably, don't give values for registers 1 & 3 -- lldb should
# get those from the ? stop packet ("T11") and it is a pref regression
# if lldb is asking for these register values.
if regnum == 0:
return "5555555555555555"
if regnum == 2:
return "c04825ebfe7f0000" # 0x00007ffeeb2548c0
return "E03"
def writeRegister(self, regnum):
return "OK"
def qXferRead(self, obj, annex, offset, length):
if annex == "target.xml":
return """<?xml version="1.0"?>
<target version="1.0">
<architecture>i386:x86-64</architecture>
<feature name="org.gnu.gdb.i386.core">
<reg name="rax" bitsize="64" regnum="0" type="code_ptr" group="general"/>
<reg name="rbx" bitsize="64" regnum="1" type="code_ptr" group="general"/>
<reg name="rsi" bitsize="64" regnum="2" type="code_ptr" group="general"/>
<reg name="rip" bitsize="64" regnum="3" type="code_ptr" group="general" altname="pc" generic="pc"/>
</feature>
</target>""", False
else:
return None, False
self.server.responder = MyResponder()
target = self.dbg.CreateTarget('')
if self.TraceOn():
self.runCmd("log enable gdb-remote packets")
self.addTearDownHook(
lambda: self.runCmd("log disable gdb-remote packets"))
process = self.connect(target)
thread = process.GetThreadAtIndex(0)
frame = thread.GetFrameAtIndex(0)
rax = frame.FindRegister("rax").GetValueAsUnsigned()
rbx = frame.FindRegister("rbx").GetValueAsUnsigned()
rsi = frame.FindRegister("rsi").GetValueAsUnsigned()
pc = frame.GetPC()
rip = frame.FindRegister("rip").GetValueAsUnsigned()
if self.TraceOn():
print("Register values: rax == 0x%x, rbx == 0x%x, rsi == 0x%x, pc == 0x%x, rip == 0x%x" % (rax, rbx, rsi, pc, rip))
self.assertEqual(rax, 0xffffffe03c778278)
self.assertEqual(rbx, 0x8877665544332211)
self.assertEqual(rsi, 0x00007ffeeb2548c0)
self.assertEqual(pc, 0x10001bc00)
self.assertEqual(rip, 0x10001bc00)
|
454620
|
import re
class Parser(object):
# json data with job parameters
__json_data = {}
def __init__(self, json_data):
self.__json_data = json_data
def get_repository_namespace(self):
repository_name = ''
if "repository" in self.__json_data and "homepage" in self.__json_data["repository"]:
match = re.match("[^:]*:([^/]*).*", self.__json_data["repository"]["url"])
repository_name = match.group(1)
return repository_name
def get_repository_url(self):
repository_url = ''
if "repository" in self.__json_data and "url" in self.__json_data["repository"]:
repository_url = self.__json_data["repository"]["url"]
return repository_url
def get_branch_name(self):
branch_name = ''
if "ref" not in self.__json_data:
return branch_name
branch_name = re.match("refs/heads/(.*)", self.__json_data["ref"])
return branch_name.group(1)
def get_project_name(self):
project_name = ''
if "repository" in self.__json_data and "name" in self.__json_data["repository"]:
project_name = self.__json_data["repository"]["name"]
return project_name
|
454624
|
import sys
import json
import os
from os import path, environ
from .printing import *
from .compatibility import *
from .utils import safe_mkdir, strip_home
from .constants import ProjInfo
def get_xdg_config_path() -> str:
"""Returns path to $XDG_CONFIG_HOME, or ~/.config, if it doesn't exist."""
return environ.get('XDG_CONFIG_HOME') or path.join(path.expanduser('~'), '.config')
def get_config_path() -> str:
"""
Detects if in testing or prod env, and returns the right config path.
:return: Path to config.
"""
test_config_path = environ.get('SHALLOW_BACKUP_TEST_CONFIG_PATH', None)
if test_config_path:
return test_config_path
else:
return path.join(get_xdg_config_path(), "shallow-backup.conf")
def get_config() -> dict:
"""
:return Config.
"""
config_path = get_config_path()
with open(config_path) as file:
try:
config = json.load(file)
except json.decoder.JSONDecodeError:
print_red_bold(f"ERROR: Invalid syntax in {config_path}")
sys.exit(1)
return config
def write_config(config) -> None:
"""
Write to config file
"""
with open(get_config_path(), 'w') as file:
json.dump(config, file, indent=4)
def get_default_config() -> dict:
"""Returns a default, platform specific config."""
return {
"backup_path": "~/shallow-backup",
"dotfiles": {
".bash_profile": {
"backup_condition": "",
"reinstall_condition": "",
},
".bashrc": {
"backup_condition": "",
"reinstall_condition": "",
},
".config/git": {
"backup_condition": "",
"reinstall_condition": "",
},
".config/nvim/init.vim": {
"backup_condition": "",
"reinstall_condition": "",
},
".config/tmux": {
"backup_condition": "",
"reinstall_condition": "",
},
".config/zsh": {
"backup_condition": "",
"reinstall_condition": "",
},
".profile": {
"backup_condition": "",
"reinstall_condition": "",
},
".pypirc": {
"backup_condition": "",
"reinstall_condition": "",
},
".ssh": {
"backup_condition": "",
"reinstall_condition": "",
},
".zshenv": {
"backup_condition": "",
"reinstall_condition": "",
},
f"{strip_home(get_config_path())}": {
"backup_condition": "",
"reinstall_condition": "",
},
},
"root-gitignore": [
"dotfiles/.ssh",
"dotfiles/.pypirc",
".DS_Store"
],
"dotfiles-gitignore": [
".ssh",
".pypirc",
".DS_Store",
],
"config_mapping": get_config_paths(),
"lowest_supported_version": ProjInfo.VERSION
}
def safe_create_config() -> None:
"""
Creates config file if it doesn't exist already. Prompts to update
it if an outdated version is detected.
"""
backup_config_path = get_config_path()
# If it doesn't exist, create it.
if not os.path.exists(backup_config_path):
print_path_blue("Creating config file at:", backup_config_path)
backup_config = get_default_config()
safe_mkdir(os.path.split(backup_config_path)[0])
write_config(backup_config)
def delete_config_file() -> None:
"""Delete config file."""
config_path = get_config_path()
if os.path.isfile(config_path):
print_red_bold("Deleting config file.")
os.remove(config_path)
else:
print_red_bold("ERROR: No config file found.")
def add_dot_path_to_config(backup_config: dict, file_path: str) -> dict:
"""
Add dotfile to config with default reinstall and backup conditions.
Exit if the file_path parameter is invalid.
:backup_config: dict representing current config
:file_path: str relative or absolute path of file to add to config
:return new backup config
"""
abs_path = path.abspath(file_path)
if not path.exists(abs_path):
print_path_red("Invalid file path:", abs_path)
return backup_config
else:
stripped_home_path = strip_home(abs_path)
print_path_blue("Added:", stripped_home_path)
backup_config["dotfiles"][stripped_home_path] = {
"reinstall_condition": "",
"backup_condition": ""
}
return backup_config
def show_config():
"""
Print the config. Colorize section titles and indent contents.
"""
print_section_header("SHALLOW BACKUP CONFIG", Fore.RED)
for section, contents in get_config().items():
# Print backup path on same line
if section == "backup_path":
print_path_red("Backup Path:", contents)
elif section == "config_mapping":
print_red_bold("\nConfigs:")
for path, dest in contents.items():
print(f" {path} -> {dest}")
# Print section header and contents. (Dotfiles)
elif section == "dotfiles":
print_path_red("\nDotfiles:", "(Backup and Reinstall conditions will be shown if they exist)")
for dotfile, options in contents.items():
backup_condition = options['backup_condition']
reinstall_condition = options['reinstall_condition']
if backup_condition or reinstall_condition:
print(f" {dotfile} ->")
print(f"\t\tbackup_condition: \"{backup_condition}\"")
print(f"\t\treinstall_condition: \"{reinstall_condition}\"")
else:
print(f" {dotfile}")
elif section == "lowest_supported_version":
print_path_red(f"{section.replace('_', ' ').capitalize()}:", contents)
else:
print_red_bold(f"\n{section.replace('-', ' ').capitalize()}: ")
for item in contents:
print(f" {item}")
|
454638
|
import bge
def main():
cont = bge.logic.getCurrentController()
own = cont.owner
sens = cont.sensors['mySensor']
actu = cont.actuators['myActuator']
if sens.positive:
cont.activate(actu)
else:
cont.deactivate(actu)
main()
|
454640
|
from .trace_helper import *
from .parser import *
from .node_transformer import *
from .script_helper import *
|
454649
|
from lego.apps.permissions.constants import LIST, VIEW
from lego.apps.permissions.permissions import PermissionHandler
class EmojiPermissionHandler(PermissionHandler):
authentication_map = {LIST: False, VIEW: False}
|
454667
|
from torchvision.transforms.functional import normalize
import torch.nn as nn
import numpy as np
def denormalize(tensor, mean, std):
mean = np.array(mean)
std = np.array(std)
_mean = -mean/std
_std = 1/std
return normalize(tensor, _mean, _std)
class Denormalize(object):
def __init__(self, mean, std):
mean = np.array(mean)
std = np.array(std)
self._mean = -mean/std
self._std = 1/std
def __call__(self, tensor):
if isinstance(tensor, np.ndarray):
return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1)
return normalize(tensor, self._mean, self._std)
def fix_bn(model):
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
def color_map(dataset):
if dataset=='streethazards':
return streethazards_cmap()
def streethazards_cmap():
# Left: modified labels after -1. Right: original label
# total classes on which the model is trained: 13 (12 common classes + sky/unlabeled)
# total classes with anomaly: 14
cmap = np.zeros((256, 3))
cmap[255] = [0, 0, 0] # // padding = -1 (n.d.) # padding, to ignore
cmap[0] = [0, 0, 0] # // unlabeled = 0 (1) # sky and unlabeled (used in training)
cmap[1] = [70, 70, 70] # // building = 1 (2),
cmap[2] = [190, 153, 153] # // fence = 2 (3),
cmap[3] = [250, 170, 160] # // other = 3 (4), # background
cmap[4] = [220, 20, 60] # // pedestrian = 4 (5),
cmap[5] = [153, 153, 153] # // pole = 5 (6),
cmap[6] = [157, 234, 50] # // road line = 6 (7),
cmap[7] = [128, 64, 128] # // road = 7 (8),
cmap[8] = [244, 35, 232] # // sidewalk = 8 (9),
cmap[9] = [107, 142, 35] # // vegetation = 9 (10),
cmap[10] = [0, 0, 142] # // car = 10 (11),
cmap[11] = [102, 102, 156] # // wall = 11 (12),
cmap[12] = [220, 220, 0] # // traffic sign = 12 (13),
cmap[13] = [60, 250, 240] # // anomaly = 13 (14)
return cmap
class Label2Color(object):
def __init__(self, cmap):
self.cmap = cmap
def __call__(self, lbls):
return self.cmap[lbls]
|
454672
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import interactive, style
import numpy as np
import random
import serial
# Initialize serial port
ser = serial.Serial()
ser.port = 'dev/cu.SLAB_USBtoUART' # Conected serial port
ser.baudrate = 115200
ser.timeout = 50
ser.open()
if ser.is_open:
print("\nAll right, serial port now open. Configuration:\n")
print(ser, "\n")
# Create figure for plotting
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = [] # Store trials here (n)
ys = [] # Store relative frequency here
rs = [] # For theoretical probablility
def animate(i, xs, ys):
""" This function is called periodically from FuncAnimation """
line = ser.readline() # ascii
line_as_list = line.split(b',')
i = int(line_as_list[0])
relProb = line_as_list[1]
relProb_as_list = relProb.split(b'\n')
relProb_float = float(relProb_as_list[0])
# Add x and y to lists
xs.append(i)
ys.append(relProb_float)
rs.append(0.5)
# Limit x and y lists to 20 items
# xs = xs[-20]
# ys = ys[-20]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys, label="Experimental Probability")
ax.plot(xs, rs, label="Theoretical Probability")
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('This is how I roll...')
plt.ylabel('Relative Frequency')
plt.legend()
plt.axis([1, None, 0, 1.1]) # Use for arbitrary number of trials
# plt.axis([1, 100, 0, 1.1]) # Use for 100 trial demo
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=1000)
plt.show()
|
454691
|
SECRET_KEY = "12345"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"wagtail.users",
"wagtail.admin",
"wagtail.images",
"wagtail.core",
"tests.my_app",
]
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": 2,
},
}
]
ROOT_URLCONF = "tests.urls"
|
454706
|
import csv
data = []
with open('output_edited.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in reader:
data.append(row)
filteredData = []
firstRow = True
for row in data:
rowData = row[0].split(',')
if firstRow:
filteredData.append(row)
firstRow = False
elif (rowData[3] == '1.0') and (rowData[4] == '1.0') and (rowData[5] == '1.0') and (rowData[6] == '1.0') and (rowData[7] == '1.0') and (rowData[8] == '1.0'):
filteredData.append(row)
with open('filtered_data.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in filteredData:
writer.writerow(row)
|
454736
|
from migrate import *
def upgrade(migrate_engine):
migrate_engine.execute('''
BEGIN;
CREATE INDEX idx_revision_author ON "revision" (author);
CREATE INDEX idx_openid ON "user" (openid);
CREATE INDEX "idx_user_name_index" on "user"((CASE WHEN ("user".fullname IS NULL OR "user".fullname = '') THEN "user".name ELSE "user".fullname END));
COMMIT;
'''
)
|
454738
|
import json
import os
import swapi
edge_lookup = {
"films": "Film",
"homeworld": "Planet",
"species": "Species",
"starships": "Starship",
"vehicles": "Vehicle",
"residents": "Character",
"characters": "Character",
"planets": "Planet",
"species": "Species",
"people": "Character",
"pilots": "Character",
}
int_vars = [
"height", "mass", # character
"orbital_period", "population", "rotation_period", "surface_water" # planet
"average_height", "average_lifespan", # species
"cargo_capacity", "passengers", "crew", "cost_in_credits", "max_atmosphering_speed" # vehicle /starship
]
float_vars = [
"gravity", # planet
"hyperdrive_rating", "length" # starship / vehicle
]
str_list_vars = [
"producer", # films
"manufacturer" # starship / vehicle
"climate", "terrain", # planet
"eye_colors", "hair_colors", "skin_colors" # species
]
sys_vars = [
"created", "edited"
]
def create_vertex(label, data):
id = data["url"].replace("https://swapi.co/api/", "").strip("/").split("/")[1]
gid = "%s:%s" % (label, id)
tdata = {"system": {}}
for k, v in data.items():
if v == "n/a":
v = None
if k in int_vars:
try:
tdata[k] = int(v)
except Exception:
tdata[k] = None
elif k in float_vars:
try:
tdata[k] = float(v)
except Exception:
tdata[k] = None
elif k in str_list_vars:
try:
tdata[k] = [x.strip() for x in v.split(",")]
except Exception:
tdata[k] = []
elif k in sys_vars:
tdata["system"][k] = v
elif k in edge_lookup:
continue
else:
tdata[k] = v
return {"gid": gid, "label": label, "data": tdata}
def create_edge(label, fid, tid):
flab, fid = fid.replace("https://swapi.co/api/", "").strip("/").split("/")
tlab, tid = tid.replace("https://swapi.co/api/", "").strip("/").split("/")
fid = "%s:%s" % (edge_lookup[flab], fid)
tid = "%s:%s" % (edge_lookup[tlab], tid)
return {"gid": "(%s)-[%s]->(%s)" % (fid, label, tid),
"label": label, "from": fid, "to": tid}
def create_all_edges(doc):
edges = []
for k, v in doc.items():
if k in edge_lookup:
if isinstance(v, list):
for tid in v:
edges.append(create_edge(k, doc["url"], tid))
elif isinstance(v, str):
edges.append(create_edge(k, doc["url"], v))
elif v is None:
continue
else:
raise TypeError("unexpected type encountered for edge key %s: %s" % (k, type(v)))
return edges
films = swapi.get_all('films').items
people = swapi.get_all('people').items
planets = swapi.get_all('planets').items
species = swapi.get_all('species').items
starships = swapi.get_all('starships').items
vehicles = swapi.get_all('vehicles').items
nmap = {"Film": films, "Character": people, "Planet": planets,
"Species": species, "Starship": starships, "Vehicle": vehicles}
vert_fh = open("swapi_vertices.json", "w")
edge_fh = open("swapi_edges.json", "w")
for label, nodes in nmap.items():
for node in nodes:
node = node.__dict__
v = create_vertex(label, node)
vert_fh.write(json.dumps(v))
vert_fh.write(os.linesep)
for e in create_all_edges(node):
edge_fh.write(json.dumps(e))
edge_fh.write(os.linesep)
vert_fh.close()
edge_fh.close()
|
454748
|
import logging
from typing import Union
from discord import User, Member, Guild
from discord.ext import commands
log = logging.getLogger(__name__)
class MemberUpdates(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_ban(self, guild: Guild, user: Union[User, Member]) -> None:
"""Event Listener which is called when a user gets banned from a Guild.
Args:
guild (Guild): The guild the user got banned from.
user (Union[User, Member]): he user that got banned.
Can be either User or Member depending if the user was in the guild or not at the time of removal.
Note:
This requires Intents.bans to be enabled.
For more information:
https://discordpy.readthedocs.io/en/latest/api.html#discord.on_member_ban
"""
log.info(f"{user} was banned from {guild.name}")
@commands.Cog.listener()
async def on_member_unban(self, guild: Guild, user: User) -> None:
"""Event Listener which is called when a user gets unbanned from a Guild.
Args:
guild (Guild): The guild the user got unbanned from.
user (User): The user that got unbanned.
Note:
This requires Intents.bans to be enabled.
For more information:
https://discordpy.readthedocs.io/en/latest/api.html#discord.on_member_unban
"""
log.info(f"{user} was unbanned from {guild.name}")
@commands.Cog.listener()
async def on_member_join(self, member: Member) -> None:
"""Event Listener which is called when a Member joins a Guild.
Args:
member (Member): The member who joined.
Note:
This requires Intents.members to be enabled.
For more information:
https://discordpy.readthedocs.io/en/latest/api.html#discord.on_member_join
"""
log.info(f"{member} has joined {member.guild.name}.")
@commands.Cog.listener()
async def on_member_remove(self, member: Member) -> None:
"""Event Listener which is called when a Member leaves a Guild.
Args:
member (Member): The member who left.
Note:
This requires Intents.members to be enabled.
For more information:
https://discordpy.readthedocs.io/en/latest/api.html#discord.on_member_remove
"""
log.info(f"{member} has left {member.guild.name}.")
@commands.Cog.listener()
async def on_member_update(self, before: Member, after: Member) -> None:
"""Event Listener which is called when a Member updates their profile.
Args:
before (Member): The updated member’s old info.
after (Member): The updated member’s updated info.
Note:
This requires Intents.members to be enabled.
For more information:
https://discordpy.readthedocs.io/en/latest/api.html#discord.on_member_update
"""
def setup(bot: commands.Bot) -> None:
"""Load the member_updates cog."""
bot.add_cog(MemberUpdates(bot))
log.info("Listener loaded: member_updates")
|
454756
|
import os.path
import sys
import argparse
import logging
from . import doc, blog, analysis, model
def main():
parser = argparse.ArgumentParser(
description = "Renders a Countershape documentation tree."
)
parser.add_argument(
"-o", "--option", type=str,
action="append", dest="options",
default = [],
help="Add option to document namespace."
)
parser.add_argument(
"-d", "--dummy",
action="store_true", dest="dummy", default=False,
help="Perform a dummy run - don't render any files."
)
group = parser.add_argument_group("Analysis")
group.add_argument(
"-s", "--structure",
action="store_true", dest="structure", default=False,
help="Show site structure."
)
group.add_argument(
"--blog-tags",
action="store_true", dest="blogtags", default=False,
help="Show blog tag histogram."
)
group.add_argument(
"--blog-notags",
action="store_true", dest="blognotags", default=False,
help="Show blog posts with no tags."
)
group.add_argument(
"--blog-has-option",
action="store", type=str, dest="bloghasoption", default=False,
help="Show blog posts with option."
)
group.add_argument(
"--blog-has-no-option",
action="store", type=str, dest="bloghasnooption", default=False,
help="Show blog posts without option."
)
group.add_argument(
"src",
help="Source directory"
)
group.add_argument(
"dst",
help="Destination directory",
nargs="?"
)
args = parser.parse_args()
analysis_options = [
"structure",
"blogtags",
"blognotags",
"bloghasoption",
"bloghasnooption"
]
if any(getattr(args, i) for i in analysis_options):
if args.dst:
parser.error("Analysis options don't take a destination.")
else:
if not args.dst:
parser.error("Render destination required.")
if os.path.abspath(args.dst) == os.path.abspath(args.src):
parser.error(
"Refusing to render documentation source onto itself."
)
d = doc.Doc(args.src, args.options)
if args.structure:
d.root.dump()
elif args.blogtags:
analysis.blog_tags(d)
elif args.blognotags:
analysis.blog_notags(d)
elif args.bloghasoption:
analysis.blog_has_option(d, args.bloghasoption)
elif args.bloghasnooption:
analysis.blog_has_no_option(d, args.bloghasnooption)
elif not args.dummy:
def render():
d = doc.Doc(args.src, args.options)
try:
d.render(args.dst)
except model.exceptions.ApplicationError, v:
print >> sys.stderr, "Error in %s"%v.page.src
print >> sys.stderr, "\t", v
return
lst = filter(
lambda x: isinstance(x, blog.Post), d.root.preOrder()
)
for i in lst:
if i.changed:
print >> sys.stderr, "Rewriting %s"%i.src
i.rewrite()
return d
render()
|
454758
|
import unittest, random, sys, time, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_import as h2i, h2o_util
def define_create_frame_params(SEED):
paramDict = {
# minimum of 5 rows to cover the 5 cluster case
'rows': [5, 100, 1000],
'cols': [10, 100], # Number of data columns (in addition to the first response column)
'seed': [None, 1234],
'randomize': [None, 1], # Avoid all constant (randomize=0) -> all points would be the same (except for NAs) -> can't find K clusters
'value': [None, 0, 1234567890, 1e6, -1e6], # Constant value (for randomize=false)
'real_range': [None, 0, 1234567890, 1e6, -1e6], # -range to range
'categorical_fraction': [None, 0.1, 1.0], # Fraction of integer columns (for randomize=true)
'binary_fraction': [0], # Fraction of binary columns (for randomize=true)
'factors': [None, 2, 10], # Factor levels for categorical variables
'integer_fraction': [None, 0.1, 1.0], # Fraction of integer columns (for randomize=true)
'integer_range': [None, 0, 1, 1234567890], # -range to range
'missing_fraction': [None, 0.1],
'response_factors': [None, 1, 2, 10], # Number of factor levels of the first column (1=real, 2=binomial, N=multinomial)
}
return paramDict
def define_KMeans_params(SEED):
paramDict = {
'k': [2, 5], # seems two slow tih 12 clusters if all cols
'initialization': ['None', 'PlusPlus', 'Furthest'],
'ignored_cols': [None, "0", "3", "0,2"],
'seed': [None, 12345678, SEED],
'normalize': [None, 0, 1],
'max_iter': [1,14],
# 'destination_key:': "junk",
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(3,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans_create_frame_fvec(self):
for trial in range(20):
cfParamDict = define_create_frame_params(SEED)
# default
params = {
'rows': 5,
'cols': 10
}
h2o_util.pickRandParams(cfParamDict, params)
b = params.get('binary_fraction', None)
i = params.get('integer_fraction', None)
c = params.get('categorical_fraction', None)
r = params.get('randomize', None)
v = params.get('value', None)
# h2o does some strict checking on the combinations of these things
# fractions have to add up to <= 1 and only be used if randomize
# h2o default randomize=1?
if not b:
b = 0
if not i:
i = 0
if not c:
c = 0
# force a good combo, by decreasing t2o a little at a time
while (i + b + c) > 1.0:
print "Trying to find a good mix of fractional", b, i, c
b = max(0, b - 0.13)
i = max(0, i - 0.17)
# what's left
c = 1.0 - (i + b)
params['binary_fraction'] = b
params['integer_fraction'] = i
params['categorical_fraction'] = c
params['value'] = None
kwargs = params.copy()
timeoutSecs = 300
hex_key = 'temp_%s.hex' % trial
cfResult = h2o.nodes[0].create_frame(key=hex_key, timeoutSecs=timeoutSecs, **kwargs)
inspect = h2o_cmd.runInspect(None, hex_key)
print "\n%s" % hex_key, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
kmeansParamDict = define_KMeans_params(SEED)
# default
params = {
'max_iter': 20,
'k': 1,
'destination_key': "KM_" + str(trial) + '.hex'
}
h2o_kmeans.pickRandKMeansParams(kmeansParamDict, params)
kwargs = params.copy()
start = time.time()
parseResult = {'destination_key': hex_key }
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "kmeans trial %s end on ", trial, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_kmeans.simpleCheckKMeans(self, kmeans, **kwargs)
### print h2o.dump_json(kmeans)
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
454768
|
from typing import Dict, Optional
from requests_html import HTML, HTMLSession
from nitter_scraper.schema import Profile # noqa: I100, I202
def username_cleaner(username: str) -> str:
"""Strips @ symbol from a username.
Example:
@dgnsrekt -> dgnsrekt
Args:
username: username with @ symbol to remove.
Returns:
Username with @ symbol stripped.
"""
return username.replace("@", "")
def link_parser(element: HTML) -> str:
"""Gets the first link from an html element
Used for the profiles website, photo and banner links.
Args:
element: HTML element with a link to parse.
Returns:
First link from a collection of links.
"""
return list(element.links)[0]
def parse_user_id_from_banner(banner_url: str) -> str:
"""Parses the users id from the users banner photo url.
The user id can only be parsed from the banner photos url.
Example:
```
/pic/profile_banners%2F2474416796%2F1600567028%2F1500x500 -> 2474416796
^ ^
| |
----------
user id section in banner link
```
Args:
banner_url: URL of the profiles banner photo.
Returns:
The target profiles user id.
"""
return banner_url.split("%2F")[1]
def stat_cleaner(stat: str) -> int:
"""Cleans and converts single stat.
Used for the tweets, followers, following, and likes count sections.
Args:
stat: Stat to be cleaned.
Returns:
A stat with commas removed and converted to int.
"""
return int(stat.replace(",", ""))
def profile_parser(elements: Dict) -> Dict:
"""Converts parsed sections to text.
Cleans and processes a dictionary of gathered html elements.
Args:
elements: Elements prepared to clean and convert.
Returns:
A dictionary of element sections cleaned and converted to their finalized types.
"""
elements["username"] = username_cleaner(elements["username"].text)
elements["name"] = elements["name"].text
if elements.get("location"):
elements["location"] = elements["location"].text
elements["is_verified"] = True if elements.get("is_verified") else False
elements["is_private"] = True if elements.get("is_private") else False
if elements.get("biography"):
elements["biography"] = elements["biography"].text
if elements.get("website"):
elements["website"] = link_parser(elements["website"])
if elements.get("profile_photo"):
elements["profile_photo"] = link_parser(elements["profile_photo"])
if elements.get("banner_photo"):
elements["banner_photo"] = link_parser(elements["banner_photo"])
elements["user_id"] = parse_user_id_from_banner(elements["banner_photo"])
if elements.get("tweets_count"):
elements["tweets_count"] = stat_cleaner(elements["tweets_count"].text)
if elements.get("following_count"):
elements["following_count"] = stat_cleaner(elements["following_count"].text)
if elements.get("followers_count"):
elements["followers_count"] = stat_cleaner(elements["followers_count"].text)
if elements.get("likes_count"):
elements["likes_count"] = stat_cleaner(elements["likes_count"].text)
return elements
def html_parser(html: HTML) -> Dict:
"""Parses HTML element into individual sections
Given an html element the html_parser will search for each profile section using
CSS selectors. All parsed html elements are gathered into a dictionary and returned.
Args:
html: HTML element from a successful nitter profile scraped response.
Returns:
A dictionary of found elements from the parsed sections.
"""
elements = {}
elements["username"] = html.find(".profile-card-username", first=True)
elements["name"] = html.find(".profile-card-fullname", first=True)
elements["biography"] = html.find(".profile-bio", first=True)
elements["location"] = html.find(".profile-location", first=True)
elements["is_verified"] = html.find(
".profile-card-fullname .icon-container .verified-icon", first=True
)
elements["is_private"] = html.find(
".profile-card-fullname .icon-container .icon-lock", first=True
)
elements["profile_photo"] = html.find(".profile-card-avatar", first=True)
elements["banner_photo"] = html.find(".profile-banner a", first=True)
elements["website"] = html.find(".profile-website", first=True)
profile_statlist = html.find(".profile-statlist", first=True)
elements["tweets_count"] = profile_statlist.find(".posts .profile-stat-num", first=True)
elements["following_count"] = profile_statlist.find(".following .profile-stat-num", first=True)
elements["followers_count"] = profile_statlist.find(".followers .profile-stat-num", first=True)
elements["likes_count"] = profile_statlist.find(".likes .profile-stat-num", first=True)
elements = {k: v for k, v in elements.items() if v is not None}
return elements
def get_profile(
username: str, not_found_ok: bool = False, address: str = "https://nitter.net"
) -> Optional[Profile]:
"""Scrapes nitter for the target users profile information.
Args:
username: The target profiles username.
not_found_ok: If not_found_ok is false (the default), a ValueError is raised if the target
profile doesn't exist. If not_found_ok is true, None will be returned instead.
address: The address to scrape profile data from. The default scrape location is
'https://nitter.net' which should be used as a backup. This value will normally be
replaced by the address of a local docker container instance of nitter.
Returns:
Profile object if successfully scraped, otherwise None.
Raises:
ValueError: If the target profile does not exist and the not_found_ok argument is false.
"""
url = f"{address}/{username}"
session = HTMLSession()
response = session.get(url)
if response.status_code == 200: # user exists
elements = html_parser(response.html)
parsed_elements = profile_parser(elements)
return Profile.from_dict(parsed_elements)
if not_found_ok:
return None
else:
raise ValueError(f'Oops! Either "{username}" does not exist or is private.')
|
454772
|
from .tool.func import *
def login_login_2(conn):
curs = conn.cursor()
ip = ip_check()
if ip_or_user(ip) == 0:
return redirect('/user')
if ban_check(None, 'login') == 1:
return re_error('/ban')
if flask.request.method == 'POST':
if captcha_post(flask.request.form.get('g-recaptcha-response', flask.request.form.get('g-recaptcha', ''))) == 1:
return re_error('/error/13')
else:
captcha_post('', 0)
user_agent = flask.request.headers.get('User-Agent', '')
user_id = flask.request.form.get('id', '')
user_data = {}
curs.execute(db_change(
'select name, data from user_set where id = ? and name = "pw" or name = "encode"'
), [user_id])
sql_data = curs.fetchall()
if not sql_data:
return re_error('/error/2')
else:
for i in sql_data:
user_data[i[0]] = i[1]
if pw_check(
flask.request.form.get('pw', ''),
user_data['pw'],
user_data['encode'],
user_id
) != 1:
return re_error('/error/10')
curs.execute(db_change('select data from user_set where name = "2fa" and id = ?'), [user_id])
fa_data = curs.fetchall()
if fa_data and fa_data[0][0] != '':
flask.session['login_id'] = user_id
return redirect('/login/2fa')
else:
flask.session['id'] = user_id
ua_plus(user_id, ip, user_agent, get_time())
conn.commit()
return redirect('/user')
else:
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('login'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post">
<input placeholder="''' + load_lang('id') + '''" name="id" type="text">
<hr class=\"main_hr\">
<input placeholder="''' + load_lang('password') + '''" name="pw" type="password">
<hr class=\"main_hr\">
''' + captcha_get() + '''
<button type="submit">''' + load_lang('login') + '''</button>
''' + http_warning() + '''
</form>
''',
menu = [['user', load_lang('return')]]
))
|
454796
|
from test.integration.base import DBTIntegrationTest, use_profile
import yaml
class TestAlterColumnTypes(DBTIntegrationTest):
@property
def schema(self):
return '056_alter_column_types'
def run_and_alter_and_test(self, alter_column_type_args):
self.assertEqual(len(self.run_dbt(['run'])), 1)
self.run_dbt(['run-operation', 'test_alter_column_type', '--args', alter_column_type_args])
self.assertEqual(len(self.run_dbt(['test'])), 1)
|
454803
|
from .optimizer import Optimizer
from .entmoot_minimize import entmoot_minimize
from .entmootopti import EntmootOpti
__all__ = [
"Optimizer","entmoot_minimize"
]
|
454807
|
import sys
import click
@click.command('me', short_help='Update current user')
@click.option('--name', help='Name of user')
@click.option('--email', help='Email address (login username)')
@click.option('--password', help='Password')
@click.option('--status', help='Status eg. active, inactive')
@click.option('--text', help='Description of user')
@click.pass_obj
def cli(obj, name, email, password, status, text):
"""Update current user details, including password reset."""
if not any([name, email, password, status, text]):
click.echo('Nothing to update.')
sys.exit(1)
client = obj['client']
try:
user = client.update_me(name=name, email=email, password=password, status=status, attributes=None, text=text)
except Exception as e:
click.echo(f'ERROR: {e}', err=True)
sys.exit(1)
click.echo(user.id)
|
454825
|
import subprocess
import unittest
import sys
import httplib
import os
import sys
import itertools
import time
from urlparse import urljoin
exe = os.environ['EXECUTABLE']
class BlackboxTestCase(unittest.TestCase):
"""Test suite that runs every test case on a single
instance of server
"""
@classmethod
def _create_http_connection(cls):
return httplib.HTTPConnection(host='127.0.0.1', port=5000)
def setUp(self):
self.proc = subprocess.Popen([exe])
self.conn = self._create_http_connection()
def request(self, *args, **kwargs):
for retry in xrange(10):
try:
self.conn.request(*args, **kwargs)
res = self.conn.getresponse()
return res
except Exception as e:
sys.stderr.write('Cannot send request ({0}).. retry {1}\n'.format(e, retry))
time.sleep(0.1)
self.conn = self._create_http_connection()
continue
raise httplib.CannotSendRequest()
def tearDown(self):
# The process should stop after receiving GET /cancel/
res = self.request('GET', '/cancel/')
self.assertEqual(res.read(), 'success=0\n')
self.conn.close()
exit_code = self.proc.wait()
self.assertEqual(exit_code, 0)
def test_get(self):
res = self.request('GET', '/get/')
self.assertEqual(res.status, 200)
lines = res.read().splitlines()
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], 'url=/get/')
self.assertEqual(lines[1], 'Host=127.0.0.1:5000')
self.assertEqual(lines[2], 'Accept-Encoding=identity')
self.assertEqual(lines[3], 'total_headers=2')
self.assertEqual(res.getheader('Transfer-Encoding'), 'chunked')
def test_invalid_method(self):
res = self.request('POST', '/get/')
self.assertEqual(res.status, 405)
self.assertEqual(res.read(), '')
self.assertEqual(res.getheader('Transfer-Encoding'), 'chunked')
def test_post(self):
res = self.request('POST', '/post/', 'hello world!')
self.assertEqual(res.status, 200)
self.assertEqual(res.read(), 'body=hello world!\n')
self.assertEqual(res.getheader('Transfer-Encoding'), 'chunked')
def test_not_found(self):
res = self.request('GET', '/page/not/found/')
self.assertEqual(res.status, 404)
self.assertEqual(res.read(), '')
def test_set_headers(self):
res = self.request('GET', '/set-headers/')
self.assertEqual(res.status, 200)
self.assertEqual(res.read(), 'url=/set-headers/\n')
self.assertEqual(res.getheader('Key0'), 'Value0')
self.assertEqual(res.getheader('Key1'), 'Value1')
self.assertEqual(res.getheader('Key2'), 'Value2')
self.assertEqual(res.getheader('Key3'), 'Value3')
self.assertEqual(res.getheader('Key4'), 'Value4')
self.assertEqual(res.getheader('Key5'), 'Value5')
self.assertEqual(res.getheader('Key6'), 'Value6')
self.assertEqual(res.getheader('Key7'), 'Value7')
self.assertEqual(res.getheader('Key8'), 'Value8')
self.assertEqual(res.getheader('Key9'), 'Value9')
def test_get_multiple(self):
res = self.request('GET', '/get/')
self.assertEqual(res.status, 200)
lines = res.read().splitlines()
self.assertEqual(lines[0], 'url=/get/')
self.assertEqual(lines[1], 'Host=127.0.0.1:5000')
self.assertEqual(lines[2], 'Accept-Encoding=identity')
self.assertEqual(lines[3], 'total_headers=2')
self.assertEqual(res.getheader('Transfer-Encoding'), 'chunked')
del lines
del res
res = self.request('GET', '/get/')
self.assertEqual(res.status, 200)
lines = res.read().splitlines()
self.assertEqual(lines[0], 'url=/get/')
self.assertEqual(lines[1], 'Host=127.0.0.1:5000')
self.assertEqual(lines[2], 'Accept-Encoding=identity')
self.assertEqual(lines[3], 'total_headers=2')
self.assertEqual(res.getheader('Transfer-Encoding'), 'chunked')
if __name__ == '__main__':
unittest.main()
|
454924
|
import os
import sys
sys.path.append("..")
sys.path.append(".")
import json
import csv
import numpy as np
import tensorflow as tf
from model.checkpoint_loader import build_model_from_config
from utils.Adam_mult import AdamWarmup, calc_train_steps
import unittest
class Test_PowerBERTModels(unittest.TestCase):
def test_finetuned_model(self):
"""Test code for finetuning task."""
fine_tuned_model, config = build_model_from_config(
'./bert_config.json',
output_dim=2,
seq_len=64,
FLAG_EXTRACT_LAYER=0,
TASK='cola')
decay_steps, warmup_steps = calc_train_steps(
8550,
batch_size=128,
epochs=3,
)
fine_tuned_model.compile(
AdamWarmup(decay_steps=decay_steps,
warmup_steps=warmup_steps,
lr=3e-5,
lr_mult=None),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
print("Fine-tuned model summary: ", fine_tuned_model.summary())
del fine_tuned_model
def test_search_model(self):
"""Test code for configurtion search."""
configuration_search_model, config = build_model_from_config(
'./bert_config.json',
output_dim=2,
seq_len=64,
LAMBDA=3e-3,
FLAG_EXTRACT_LAYER=1,
TASK='cola')
decay_steps, warmup_steps = calc_train_steps(
8550,
batch_size=128,
epochs=3,
)
configuration_search_model.compile(
AdamWarmup(decay_steps=decay_steps,
warmup_steps=warmup_steps,
lr=3e-5,
lr_mult=None),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
print("Configuration search model summary: ", configuration_search_model.summary())
del configuration_search_model
def test_retrained_model(self):
"""Test code for retrained model."""
retrained_model, config = build_model_from_config(
'./bert_config.json',
output_dim=2,
seq_len=64,
retention_configuration=[64, 64, 64, 32, 32, 32, 16, 16, 16, 8, 8, 8],
FLAG_EXTRACT_LAYER=2,
TASK='cola')
decay_steps, warmup_steps = calc_train_steps(
8550,
batch_size=128,
epochs=3,
)
retrained_model.compile(
AdamWarmup(decay_steps=decay_steps,
warmup_steps=warmup_steps,
lr=3e-5,
lr_mult=None),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
print("Retrained model summary: ", retrained_model.summary())
del retrained_model
if __name__ == '__main__':
unittest.main()
|
454940
|
from sqlalchemy.exc import IntegrityError
from grouper.fe.forms import PermissionCreateForm
from grouper.fe.util import GrouperHandler, test_reserved_names
from grouper.models.audit_log import AuditLog
from grouper.permissions import create_permission
from grouper.user_permissions import user_creatable_permissions
from grouper.util import matches_glob
class PermissionsCreate(GrouperHandler):
def get(self):
can_create = user_creatable_permissions(self.session, self.current_user)
if not can_create:
return self.forbidden()
return self.render(
"permission-create.html", form=PermissionCreateForm(), can_create=sorted(can_create)
)
def post(self):
can_create = user_creatable_permissions(self.session, self.current_user)
if not can_create:
return self.forbidden()
form = PermissionCreateForm(self.request.arguments)
if not form.validate():
return self.render(
"permission-create.html", form=form, alerts=self.get_form_alerts(form.errors)
)
# A user is allowed to create a permission if the name matches any of the globs that they
# are given access to via PERMISSION_CREATE, as long as the permission does not match a
# reserved name. (Unless specifically granted.)
allowed = False
for creatable in can_create:
if matches_glob(creatable, form.data["name"]):
allowed = True
for failure_message in test_reserved_names(form.data["name"]):
form.name.errors.append(failure_message)
if not allowed:
form.name.errors.append("Permission name does not match any of your allowed patterns.")
if form.name.errors:
return self.render(
"permission-create.html", form=form, alerts=self.get_form_alerts(form.errors)
)
try:
permission = create_permission(
self.session, form.data["name"], form.data["description"]
)
self.session.flush()
except IntegrityError:
self.session.rollback()
form.name.errors.append("Name already in use. Permissions must be unique.")
return self.render(
"permission-create.html",
form=form,
can_create=sorted(can_create),
alerts=self.get_form_alerts(form.errors),
)
self.session.commit()
AuditLog.log(
self.session,
self.current_user.id,
"create_permission",
"Created permission.",
on_permission_id=permission.id,
)
# No explicit refresh because handler queries SQL.
return self.redirect("/permissions/{}".format(permission.name))
|
454964
|
import struct
from typing import List
from bitcoin_client.hwi.serialization import CTransaction, hash256, ser_string
def bip143_digest(tx: CTransaction,
amount: int,
input_index: int,
sig_hash: int = 0x01) -> bytes:
hash_prev_outs: bytes = b"".join([
txin.prevout.serialize() for txin in tx.vin
])
hash_sequence: bytes = b"".join([
struct.pack("<I", txin.nSequence) for txin in tx.vin
])
hash_outputs: bytes = b"".join([
txout.serialize() for txout in tx.vout
])
digest: bytes = hash256(
b"".join([
struct.pack("<i", tx.nVersion),
hash256(hash_prev_outs),
hash256(hash_sequence),
tx.vin[input_index].prevout.serialize(), # outpoint
ser_string(tx.vin[input_index].scriptSig),
struct.pack("<q", amount),
struct.pack("<I", tx.vin[input_index].nSequence),
hash256(hash_outputs),
struct.pack("<I", tx.nLockTime),
sig_hash.to_bytes(4, byteorder="little")
])
)
# print(f"version: {struct.pack('<i', tx.nVersion).hex()}")
# print(f"hash_prev_outs: {hash256(hash_prev_outs).hex()}")
# print(f"hash_sequence: {hash256(hash_sequence).hex()}")
# print(f"outpoint: {tx.vin[input_index].prevout.serialize().hex()}")
# print(f"scriptSig: {ser_string(tx.vin[input_index].scriptSig).hex()}")
# print(f"amount: {struct.pack('<q', amount).hex()}")
# print(f"sequence: {struct.pack('<I', tx.vin[input_index].nSequence).hex()}")
# print(f"hash_outputs: {hash256(hash_outputs).hex()}")
# print(f"lock_time: {struct.pack('<I', tx.nLockTime).hex()}")
# print(f"digest: {digest.hex()}")
return digest
def bip32_path_from_string(path: str) -> List[bytes]:
"""Convert BIP32 path string to list of bytes."""
splitted_path: List[str] = path.split("/")
if "m" in splitted_path and splitted_path[0] == "m":
splitted_path = splitted_path[1:]
return [int(p).to_bytes(4, byteorder="big") if "'" not in p
else (0x80000000 | int(p[:-1])).to_bytes(4, byteorder="big")
for p in splitted_path]
def compress_pub_key(pub_key: bytes) -> bytes:
"""Convert uncompressed to compressed public key."""
if pub_key[-1] & 1:
return b"\x03" + pub_key[1:33]
return b"\x02" + pub_key[1:33]
|
454965
|
import os
from typing import List, Dict, Tuple, Optional
import yaml
from app.clients.jenkins_client import JenkinsInstanceConfig
class TriggearConfig:
def __init__(self) -> None:
self.__jenkins_instances: Dict[str, JenkinsInstanceConfig] = {}
self.__github_token: Optional[str] = None
self.__triggear_token: Optional[str] = None
@property
def jenkins_instances(self) -> Dict[str, JenkinsInstanceConfig]:
if self.__jenkins_instances == {}:
self.__github_token, self.__triggear_token, self.__jenkins_instances = self.read_credentials_file()
return self.__jenkins_instances
@property
def github_token(self) -> str:
if self.__github_token is None:
self.__github_token, self.__triggear_token, self.__jenkins_instances = self.read_credentials_file()
return self.__github_token
@property
def triggear_token(self) -> str:
if self.__triggear_token is None:
self.__github_token, self.__triggear_token, self.__jenkins_instances = self.read_credentials_file()
return self.__triggear_token
@staticmethod
def read_credentials_file() -> Tuple[str, str, Dict[str, JenkinsInstanceConfig]]:
with open(os.getenv('CREDS_PATH', 'creds.yml'), 'r') as stream:
config = yaml.load(stream)
instances_config: List[Dict[str, str]] = config['jenkins_instances']
jenkins_instances: Dict[str, JenkinsInstanceConfig] = {}
for instance_config in instances_config:
url = instance_config['url']
user = instance_config['user']
token = instance_config['token']
jenkins_instances[url] = JenkinsInstanceConfig(url, user, token)
return config['github_token'], config['triggear_token'], jenkins_instances
|
454977
|
import numpy as np
import os
multivariate_dims = [2, 4, 8]
N_BY_CLUS = 10
BASE_PATH = os.path.join("resources", "benchmarks", "datasets")
BASE_CHAIN_PATH = os.path.join("resources", "benchmarks", "chains")
if __name__ == '__main__':
os.makedirs(BASE_PATH, exist_ok=True)
os.makedirs(BASE_CHAIN_PATH, exist_ok=True)
np.random.seed(2021)
univ_y = np.concatenate(
[np.random.normal(loc=-5, size=N_BY_CLUS),
np.random.normal(loc=5, size=N_BY_CLUS)])
fname = os.path.join(BASE_PATH, "univariate_gaussian.csv")
np.savetxt(fname, univ_y)
for d in multivariate_dims:
multiv_y = np.vstack(
[np.random.normal(loc=-5, size=(N_BY_CLUS, d)),
np.random.normal(loc=5, size=(N_BY_CLUS, d))])
fname = os.path.join(
BASE_PATH, "multi_gaussian_dim_{0}.csv".format(d))
np.savetxt(fname, multiv_y)
|
455030
|
import datetime
import json
import os
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, get_next_unused_name
def get_browser_name(file_name):
if 'microsoft' in file_name.lower():
return 'Edge'
elif 'chrome' in file_name.lower():
return 'Chrome'
elif 'opera' in file_name.lower():
return 'Opera'
else:
return 'Unknown'
def get_chromeBookmarks(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if not os.path.basename(file_found) == 'Bookmarks': # skip -journal and other files
continue
elif file_found.find('.magisk') >= 0 and file_found.find('mirror') >= 0:
continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??
browser_name = get_browser_name(file_found)
if file_found.find('app_sbrowser') >= 0:
browser_name = 'Browser'
with open(file_found, "r") as f:
dataa = json.load(f)
data_list = []
for x, y in dataa.items():
flag = 0
if isinstance(y,dict):
for key, value in y.items():
if isinstance(value,dict):
for keyb, valueb in value.items():
if keyb == 'children':
if len(valueb) > 0:
url = valueb[0]['url']
dateadd = valueb[0]['date_added']
dateaddconv = datetime.datetime(1601, 1, 1) + datetime.timedelta(microseconds=int(dateadd))
name = valueb[0]['name']
typed = valueb[0]['type']
flag = 1
if keyb == 'name' and flag == 1:
flag = 0
parent = valueb
data_list.append((dateaddconv, url, name, parent, typed))
num_entries = len(data_list)
if num_entries > 0:
report = ArtifactHtmlReport(f'{browser_name} Bookmarks')
#check for existing and get next name for report file, so report from another file does not get overwritten
report_path = os.path.join(report_folder, f'{browser_name} Bookmarks.temphtml')
report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
report.start_artifact_report(report_folder, os.path.basename(report_path))
report.add_script()
data_headers = ('Added Date', 'URL', 'Name', 'Parent', 'Type')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'{browser_name} Bookmarks'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'{browser_name} Bookmarks'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Browser Bookmarks data available')
|
455047
|
def solver_function(L1: list, L2: list, C1: list, C2: list,
C_max: int) -> QuantumCircuit:
# the number of qubits representing answers
index_qubits = len(L1)
# the maximum possible total cost
max_c = sum([max(l0, l1) for l0, l1 in zip(C1, C2)])
min_c = sum([min(l0, l1) for l0, l1 in zip(C1, C2)])
max_c -= min_c #reduce number of qubits used
# the number of qubits representing data values can be defined using the maximum possible total cost as follows:
data_qubits = math.ceil(math.log(max_c, 2)) + 1 if not max_c & (
max_c - 1) == 0 else math.ceil(math.log(max_c, 2)) + 2
### Phase Operator ###
# return part
def phase_return(index_qubits: int,
gamma: float,
L1: list,
L2: list,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
##############################
### U_1(gamma * (lambda2 - lambda1)) for each qubit ###
# Provide your code here
for ii in range(index_qubits):
# if problem statement is correct,
# L2 > L1 cannot give zero here
qc.p(-gamma * (L2[ii] - L1[ii]), qr_index[ii])
qc = transpile(qc,
optimization_level=2,
basis_gates=['rz', 'sx', 'cx'],
seed_transpiler=42)
##############################
return qc.to_gate(label=" phase return ") if to_gate else qc
# penalty part
def subroutine_add_const(data_qubits: int,
const: int,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qc = QuantumCircuit(data_qubits)
##############################
### Phase Rotation ###
# Provide your code here
const_str = bin(const)[2:].zfill(data_qubits)
const_str = const_str[::-1]
rotation_angles = [0] * data_qubits
for ii in range(data_qubits):
for jj in range(ii, data_qubits):
if const_str[ii] == '1':
# seems simplist
rotation_angles[jj] += np.pi / (2**(jj - ii))
for jj in range(data_qubits):
qc.p(rotation_angles[jj], jj)
qc = transpile(qc,
optimization_level=2,
basis_gates=['rz', 'sx', 'cx'],
seed_transpiler=42)
##############################
return qc.to_gate(label=" [+" + str(const) + "] ") if to_gate else qc
# penalty part
def const_adder(data_qubits: int,
const: int,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_data)
##############################
### Phase Rotation ###
# Use `subroutine_add_const`
qc.append(subroutine_add_const(data_qubits, const, to_gate), qr_data)
##############################
return qc.to_gate(label=" [ +" + str(const) + "] ") if to_gate else qc
# penalty part
def cost_calculation(index_qubits: int,
data_qubits: int,
list1: list,
list2: list,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
# just to make sure
num_bit = data_qubits - 1
diff = 2**num_bit - (C_max - min_c) - 2 # 2 works, 1 not sure
# should I subtract from cost(z) any ways?
# this one won't subtrace in some cases
if diff < 0:
diff = 0
diff_str_inv = bin(diff)[2:].zfill(data_qubits)[::-1]
for ii in range(data_qubits):
if diff_str_inv[ii] == '1':
qc.x(qr_data[ii])
qc.h(qr_index[0])
if list2[0] > list1[0]: #just to make sure
first_val_diff = list2[0] - list1[0] + diff
else:
first_val_diff = list1[0] - list2[0] + diff
first_val_diff_str = bin(first_val_diff)[2:].zfill(data_qubits)[::-1]
for ii in range(data_qubits):
if diff_str_inv[ii] != first_val_diff_str[ii]:
qc.cx(qr_index[0], qr_data[ii])
# no transpile a_d = 1
qft = QFT(data_qubits,
approximation_degree=1,
do_swaps=False,
inverse=False,
insert_barriers=False,
name=None).decompose()
qft = transpile(qft,
basis_gates=['sx', 'rz', 'cx'],
optimization_level=2,
seed_transpiler=42)
qc.append(qft, qr_data)
for i, (val1, val2) in enumerate(zip(list1[1:], list2[1:])):
##############################
### Add val2 using const_adder controlled by i-th index register (set to 1) ###
# Provide your code here
if val2 > val1:
adder_gate2 = const_adder(data_qubits, val2 - val1, True)
ctrl_adder_gate2 = adder_gate2.control()
qc.append(ctrl_adder_gate2, [qr_index[i + 1]] + qr_data[:])
##############################
else:
qc.x(qr_index[i + 1])
##############################
### Add val1 using const_adder controlled by i-th index register (set to 0) ###
# Provide your code here
adder_gate1 = const_adder(data_qubits, val1 - val2, True)
ctrl_adder_gate1 = adder_gate1.control()
qc.append(ctrl_adder_gate1, [qr_index[i + 1]] + qr_data[:])
qc = transpile(qc,
basis_gates=['sx', 'rz', 'cx'],
optimization_level=2,
seed_transpiler=42)
qfti = QFT(data_qubits,
approximation_degree=1,
do_swaps=False,
inverse=True,
insert_barriers=False,
name=None).decompose()
qfti = transpile(qfti,
basis_gates=['sx', 'rz', 'cx'],
optimization_level=2,
seed_transpiler=42)
qc.append(qfti, qr_data)
qc = qc.decompose()
##############################
# qc.x(qr_index[i]) #we invert this any ways
return qc.to_gate(label=" Cost Calculation ") if to_gate else qc
# penalty part
def constraint_testing(data_qubits: int,
C_max: int,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
##############################
### Set the flag register for indices with costs larger than C_max ###
# Provide your code here
# do what the paper suggested
num_bit = data_qubits - 1
# I think I can simplify to just 1 cx
qc.cx(qr_data[num_bit], qr_f)
qc = transpile(qc,
optimization_level=2,
basis_gates=['rz', 'sx', 'cx'],
seed_transpiler=42)
#print('constraint')
#print(transpile(qc,backend=sim,optimization_level=0,basis_gates=['rz','sx','cx']).size())
##############################
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
# penalty part
def penalty_dephasing(data_qubits: int,
alpha: float,
gamma: float,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
##############################
### Phase Rotation ###
# Provide your code here
for ii in range(data_qubits):
qc.cp(alpha * gamma * 2**(ii), qr_f, qr_data[ii])
num_bit = data_qubits - 1
qc.p(-alpha * gamma * (2**num_bit),
qr_f) # -2 the same ammount did not help
qc = transpile(qc,
optimization_level=2,
basis_gates=['rz', 'sx', 'cx'],
seed_transpiler=42)
##############################
#print('dephasing')
#print(transpile(qc,backend=sim,optimization_level=0,basis_gates=['rz','sx','cx']).size())
return qc.to_gate(label=" Penalty Dephasing ") if to_gate else qc
# penalty part
def reinitialization(index_qubits: int,
data_qubits: int,
C1: list,
C2: list,
C_max: int,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_index, qr_data, qr_f)
##############################
### Reinitialization Circuit ###
# Provide your code here
ctt_inv = constraint_testing(data_qubits, C_max,
to_gate=True).inverse()
qc.append(ctt_inv, qr_data[:] + qr_f[:])
cost_inv = cost_calculation(index_qubits,
data_qubits,
C1,
C2,
to_gate=True).inverse()
qc.append(cost_inv, qr_index[:] + qr_data[:])
qc = transpile(qc,
optimization_level=2,
basis_gates=['rz', 'sx', 'cx'],
seed_transpiler=42)
#print('reinit')
#print(transpile(qc,backend=sim,optimization_level=0,basis_gates=['rz','sx','cx']).size())
##############################
return qc.to_gate(label=" Reinitialization ") if to_gate else qc
### Mixing Operator ###
def mixing_operator(index_qubits: int,
beta: float,
to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
##############################
### Mixing Operator ###
# Provide your code here
for ii in range(index_qubits):
qc.rx(2 * beta, qr_index[ii])
qc = transpile(qc,
optimization_level=2,
basis_gates=['rz', 'sx', 'cx'],
seed_transpiler=42)
##############################
#print('mixing')
#print(transpile(qc,backend=sim,optimization_level=0,basis_gates=['rz','sx','cx']).size())
return qc.to_gate(label=" Mixing Operator ") if to_gate else qc
qr_index = QuantumRegister(index_qubits, "index") # index register
qr_data = QuantumRegister(data_qubits, "data") # data register
qr_f = QuantumRegister(1, "flag") # flag register
cr_index = ClassicalRegister(
index_qubits, "c_index"
) # classical register storing the measurement result of index register
qc = QuantumCircuit(qr_index, qr_data, qr_f, cr_index)
### initialize the index register with uniform superposition state ###
qc.h(qr_index)
### DO NOT CHANGE THE CODE BELOW
p = 5
alpha = 1
for i in range(p):
### set fixed parameters for each round ###
beta = 1 - (i + 1) / p
gamma = (i + 1) / p
### return part ###
qc.append(phase_return(index_qubits, gamma, L1, L2), qr_index)
### step 1: cost calculation ###
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2),
qr_index[:] + qr_data[:])
### step 2: Constraint testing ###
qc.append(constraint_testing(data_qubits, C_max), qr_data[:] + qr_f[:])
### step 3: penalty dephasing ###
qc.append(penalty_dephasing(data_qubits, alpha, gamma),
qr_data[:] + qr_f[:])
### step 4: reinitialization ###
qc.append(reinitialization(index_qubits, data_qubits, C1, C2, C_max),
qr_index[:] + qr_data[:] + qr_f[:])
### mixing operator ###
qc.append(mixing_operator(index_qubits, beta), qr_index)
### measure the index ###
### since the default measurement outcome is shown in big endian, it is necessary to reverse the classical bits in order to unify the endian ###
qc.measure(qr_index, cr_index[::-1])
return qc
|
455055
|
from __future__ import print_function
import sys
import pyexcel as pe
book = pe.get_book(file_name=sys.argv[1])
bfh = open('samocha.benign.vcf', 'w')
pfh = open('samocha.pathogenic.vcf', 'w')
header = """##fileformat=VCFv4.1
##source=pathoscore
##reference=GRCh37
##INFO=<ID=MPC,Number=1,Type=Float,Description="MPC score">
#CHROM POS ID REF ALT QUAL FILTER INFO"""
print(header, file=bfh)
print(header, file=pfh)
sheet = book['Table_S8']
for i, record in enumerate(sheet):
if i == 0:
keys = map(str, record)
continue
record = dict(zip(keys, record))
fh = bfh if (record['dataset'] == 'control') else pfh
if record['MPC'] == "NA":
print("{chrom}\t{pos}\t.\t{ref}\t{alt}\t10\tPASS\t.".format(chrom=record['chrom'], pos=record['pos'],
ref=record['ref'], alt=record['alt']), file=fh)
else:
print("{chrom}\t{pos}\t.\t{ref}\t{alt}\t10\tPASS\tMPC={MPC}".format(chrom=record['chrom'], pos=record['pos'],
ref=record['ref'], alt=record['alt'], MPC="%.4f" % float(record['MPC'])), file=fh)
bfh.close()
pfh.close()
print("DONE")
|
455105
|
import hy
# For the side-effect of allowing import of Hy programs.
import pytest
def pytest_collect_file(parent, path):
if path.basename.startswith('test_') and path.ext == ".hy":
return pytest.Module.from_parent(parent, fspath=path)
|
455123
|
from django.urls import path
from .views import GoogleSocialAuthView
urlpatterns = [
path('google/', GoogleSocialAuthView.as_view()),
]
|
455131
|
import time
import re
from threading import Lock, Thread
from src.utility.exceptions import OperationError
class Connection:
def __init__(self, terminal=None):
self._terminal = terminal
self._reader_running = False
self._auto_read_enabled = True
self._auto_reader_lock = Lock()
def is_connected(self):
raise NotImplementedError()
def disconnect(self):
raise NotImplementedError()
def read_line(self):
raise NotImplementedError()
def read_all(self):
raise NotImplementedError()
def read_junk(self):
self.read_all()
def read_one_byte(self):
raise NotImplementedError()
def read_to_next_prompt(self, timeout=5.0):
ret = b""
t_start = time.time()
while len(ret) < 4 or ret[-4:] != b">>> ":
if (time.time() - t_start) >= timeout:
raise TimeoutError()
ret += self.read_one_byte()
return ret.decode("utf-8", errors="replace")
def send_line(self, line_text, ending="\r\n"):
raise NotImplementedError()
def send_character(self, char):
raise NotImplementedError()
def send_bytes(self, binary):
raise NotImplementedError()
def send_block(self, text):
lines = text.split("\n")
if len(lines) == 1:
self.send_line(lines[0])
elif len(lines) > 1:
self.send_start_paste()
for line in lines:
self.send_line(line)
self.send_end_paste()
def run_file(self, file_name, globals_init=""):
self.send_start_paste()
if globals_init:
self.send_line(globals_init, "\r")
self.send_line("with open(\"{}\") as f:".format(file_name))
self.send_line(" exec(f.read(), globals())")
self.send_end_paste()
def remove_file(self, file_name):
success = True
# Prevent echo
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
self.send_line("import os; os.remove(\"{}\")".format(file_name))
try:
self.read_to_next_prompt()
except TimeoutError:
success = False
self._auto_read_enabled = True
self._auto_reader_lock.release()
if not success:
raise OperationError()
def get_file_size(self, file_name):
success = True
file_size = 0
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
self.send_line("import os; os.stat(\"{}\")".format(file_name))
try:
res = self.read_to_next_prompt()
# Skip first line which is command echo
res = res[res.find("\n"):]
# Strip parentheses and split to items
items = res.strip("()\r\n ").split(", ")
# Sixth item is file size
file_size = int(items[6])
except TimeoutError:
success = False
self._auto_read_enabled = True
self._auto_reader_lock.release()
if not success:
raise OperationError()
return file_size
def send_start_paste(self):
self.send_character("\5")
def send_end_paste(self):
self.send_character("\4")
def send_kill(self):
self.send_character("\3")
def _reader_thread_routine(self):
self._reader_running = True
while self._reader_running:
self._auto_reader_lock.acquire()
x = ""
if self._auto_read_enabled:
x = self.read_line()
self._auto_reader_lock.release()
time.sleep(0.1 if not x else 0)
@staticmethod
def _get_remote_file_name(local_file_path):
return local_file_path.rsplit("/", 1)[1]
def list_files(self):
success = True
# Pause autoreader so we can receive response
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
# Stop any running script
self.send_kill()
# Read any leftovers
self.read_junk()
# Mark the start of file listing communication
self.send_line("print('#fs#')")
# Now we either wait for any running program to finish
# or read output that it might be producing until it finally
# closes and our command gets executed.
ret = ""
while "#fs#" not in ret:
try:
ret = self.read_to_next_prompt()
except TimeoutError:
success = False
# Now we can be sure that we are ready for listing files
# Send command for listing files
if success:
self.send_line("import os; os.listdir()")
# Wait for reply
try:
ret = self.read_to_next_prompt()
except TimeoutError:
success = False
self._auto_read_enabled = True
self._auto_reader_lock.release()
if success and ret:
return re.findall("'([^']+)'", ret)
else:
raise OperationError()
def _write_file_job(self, remote_name, content, transfer):
raise NotImplementedError()
def write_file(self, file_name, text, transfer):
job_thread = Thread(target=self._write_file_job,
args=(file_name, text, transfer))
job_thread.setDaemon(True)
job_thread.start()
def _write_files_job(self, local_file_paths, transfer):
for local_path in local_file_paths:
remote_name = self._get_remote_file_name(local_path)
with open(local_path, "rb") as f:
content = f.read()
self._write_file_job(remote_name, content, transfer)
if transfer.cancel_scheduled:
transfer.confirm_cancel()
if transfer.error or transfer.cancelled:
break
def write_files(self, local_file_paths, transfer):
job_thread = Thread(target=self._write_files_job,
args=(local_file_paths, transfer))
job_thread.setDaemon(True)
job_thread.start()
def _read_file_job(self, file_name, transfer):
raise NotImplementedError()
def read_file(self, file_name, transfer):
job_thread = Thread(target=self._read_file_job, args=(file_name, transfer))
job_thread.setDaemon(True)
job_thread.start()
|
455137
|
import torch
from pinot import Net
from pinot.regressors import (
ExactGaussianProcessRegressor,
NeuralNetworkRegressor,
)
class MultiOutputNet(Net):
""" An object that combines the representation and parameter
learning, puts into a predicted distribution and calculates the
corresponding divergence.
Attributes
----------
representation: a `pinot.representation` module
the model that translates graphs to latent representations
"""
def __init__(
self,
representation,
output_regressor=NeuralNetworkRegressor,
**kwargs
):
super(MultiOutputNet, self).__init__(
representation, output_regressor, **kwargs
)
self.has_exact_gp = output_regressor == ExactGaussianProcessRegressor
self.output_regressors = torch.nn.ModuleDict()
self.kwargs = kwargs
def condition(self, g, task):
""" Compute the output distribution with sampled weights.
"""
kwargs = {}
if self.has_exact_gp:
# adjust the representations
h_last = self.representation(self.g_last)
mask_last = self._generate_mask(self.y_last)[:, task]
# mask input
h_last = self._mask_tensor(h_last, mask_last)
y_last = self._mask_tensor(self.y_last, mask_last, task)
kwargs = {"x_tr": h_last, "y_tr": y_last}
# moduledicts like string keys
task = str(task)
# get representation
h = self.representation(g)
# get output output_regressor for a particular task
self.output_regressor = self._get_output_regressor(task)
# get distribution for input
distribution = self.output_regressor.condition(h, **kwargs)
return distribution
def loss(self, g, y):
""" Compute the loss from input graph and corresponding y.
"""
loss = 0.0
if self.has_exact_gp:
self.g_last = g
self.y_last = y
# for each task in the data, split up data
h = self.representation(g)
l = self._generate_mask(y)
for task, mask in enumerate(l.T):
if mask.any():
# switch to output_regressor for that task
self.output_regressor = self._get_output_regressor(task)
if self.has_exact_gp:
# mask input if ExactGP
h_task = self._mask_tensor(h, mask)
y_task = self._mask_tensor(y, mask, task)
loss += self.output_regressor.loss(h_task, y_task).mean()
else:
# mask output if VariationalGP
distribution = self.output_regressor.condition(h)
y_dummy = self._generate_y_dummy(y, task)
loss += -distribution.log_prob(y_dummy)[mask].mean()
return loss
def _get_output_regressor(self, task):
""" Returns output_regressor for a task.
"""
# ModuleDict needs str
task = str(task)
# if we already instantiated the output_regressor
if task not in self.output_regressors:
# get the type of self.output_regressor, and instantiate it
self.output_regressors[task] = self.output_regressor_cls(
self.representation_out_features, **self.kwargs
)
# move to cuda if the parent net is
if next(self.parameters()).is_cuda:
self.output_regressors[task].cuda()
return self.output_regressors[task]
def _generate_y_dummy(self, y, task):
""" Generates y dummy - fill nans with zeros.
"""
y_dummy = y[:, task]
y_dummy[torch.isnan(y_dummy)] = 0
return y_dummy.view(-1, 1)
def _mask_tensor(self, x, mask, task=None):
""" Subsets data given mask for particular task.
"""
if task != None:
ret = x[mask, task].unsqueeze(-1)
else:
ret = x[mask]
return ret
def _generate_mask(self, y):
""" Creates a boolean mask where y is nan.
"""
return ~torch.isnan(y)
|
455151
|
from django.http import HttpRequest
from django.http import HttpResponse
from django.shortcuts import render
def lab_open_source(request: HttpRequest) -> HttpResponse:
return render(
request,
'about/lab-open-source.html',
{
'title': 'Open Source in our Computer Lab',
},
)
def lab_vote(request: HttpRequest) -> HttpResponse:
return render(
request,
'about/lab-vote.html',
{
'title': 'OCF: Register to vote',
},
)
def lab_survey(request: HttpRequest) -> HttpResponse:
return render(
request,
'about/lab-survey.html',
{
'title': 'STF Renewal Survey',
},
)
|
455154
|
import os
from mfr import settings
config = settings.child('UNOCONV_EXTENSION_CONFIG')
UNOCONV_BIN = config.get('UNOCONV_BIN', '/usr/local/bin/unoconv')
UNOCONV_TIMEOUT = int(config.get('UNOCONV_TIMEOUT', 60))
ADDRESS = config.get('SERVER', os.environ.get('UNOCONV_PORT_2002_TCP_ADDR', '127.0.0.1'))
PORT = config.get('PORT', os.environ.get('UNOCONV_PORT_2002_TCP_PORT', '2002'))
DEFAULT_RENDER = {'renderer': '.pdf', 'format': 'pdf'}
RENDER_MAP = config.get_object('RENDER_MAP', {
# 'csv': {'renderer': '.xlsx', 'format': 'xlsx'},
# 'ppt': {'renderer': '.pdf', 'format': 'pdf'},
# 'pptx': {'renderer': '.pdf', 'format': 'pdf'},
})
|
455166
|
import FWCore.ParameterSet.Config as cms
ecal2006TBWeightUncalibRecHit = cms.EDProducer("EcalTBWeightUncalibRecHitProducer",
use2004OffsetConvention = cms.untracked.bool(False),
EBdigiCollection = cms.InputTag('ecalTBunpack',''),
EEdigiCollection = cms.InputTag('',''),
tdcRecInfoCollection = cms.InputTag('ecal2006TBTDCReconstructor','EcalTBTDCRecInfo'),
EBhitCollection = cms.string('EcalUncalibRecHitsEB'),
nbTimeBin = cms.int32(25),
)
|
455167
|
from typing import Set, List, Tuple, Optional, Dict
import numpy as np
from algorithms.algorithm import Algorithm
from algorithms.basic_testing import BasicTesting
from algorithms.configuration.entities.goal import Goal
from algorithms.configuration.maps.map import Map
from algorithms.configuration.maps.ros_map import RosMap
from simulator.services.services import Services
from simulator.views.map.display.gradient_list_map_display import GradientListMapDisplay
from simulator.views.map.display.map_display import MapDisplay
from simulator.views.map.display.solid_iterable_map_display import SolidIterableMapDisplay
from structures import Point, Colour, BLUE, DynamicColour
from structures.factory import gen_set, gen_list
"""
Child-Generator-Deque-Search
"""
class CGDS(Algorithm):
class InternalMemory:
deque: List[Tuple[Point, float, Point, List[Tuple[Point, float]]]]
dequeVisual: List[Tuple[int, Point]]
visited: Set[Point]
back_pointer: Dict[Point, Optional[Point]]
costs: Dict[Point, float]
h: Dict[Point, float]
def __init__(self, services: Services):
self.deque = gen_list(services)
self.dequeVisual = gen_list(services)
self.visited = gen_set(services)
self.back_pointer = {}
self.costs = {}
self.h = {}
mem: InternalMemory
pq_colour_max: DynamicColour
pq_colour_min: DynamicColour
visited_colour: DynamicColour
__map_displays: List[MapDisplay]
def __init__(self, services: Services, testing: BasicTesting = None):
super().__init__(services, testing)
self.mem = CGDS.InternalMemory(self._services)
self.pq_colour_max = self._services.state.views.add_colour("explored max", BLUE)
self.pq_colour_min = self._services.state.views.add_colour("explored min", Colour(0.27, 0.33, 0.35, 0.2))
self.visited_colour = self._services.state.views.add_colour("visited", Colour(0.19, 0.19, 0.2, 0.8))
self.__map_displays = [SolidIterableMapDisplay(self._services, self.mem.visited, self.visited_colour, z_index=50),
GradientListMapDisplay(self._services, self.mem.dequeVisual, min_colour=self.pq_colour_min,
max_colour=self.pq_colour_max, z_index=49, inverted=True)
]
def set_display_info(self) -> List[MapDisplay]:
"""
Read super description
"""
return super().set_display_info() + self.__map_displays
# noinspection PyUnusedLocal
# @profile
def _find_path_internal(self) -> None:
self._init_mem()
if self._expand():
self._follow_back_trace()
def _init_mem(self) -> None:
grid: Map = self._get_grid()
# push agent
self.mem.deque.insert(0, (grid.agent.position, 0.0, None, None))
self.mem.back_pointer[grid.agent.position] = None
def _expand(self) -> bool:
grid: Map = self._get_grid()
while len(self.mem.deque) > 0:
parentNode: Point
childrenNodes: List[Tuple[Point, float]]
parentNode, cost, prev, childrenNodes = self.mem.deque.pop(0)
if childrenNodes is None:
# New parent Node whose children haven't been generated yet
# Check to see if cost is lower or equal to what has been seen before
if parentNode in self.mem.costs and self.mem.costs[parentNode] <= cost:
continue # skip since cost isn't lower than previously seen
self.mem.costs[parentNode] = cost
self.mem.back_pointer[parentNode] = prev
self.mem.visited.add(parentNode)
# Check if we've reached the goal
if grid.is_goal_reached(parentNode):
return True
# Generate children:
children = []
for child, idx in grid.get_next_positions_with_move_index(parentNode):
dist = grid.get_movement_cost_from_index(idx)
children.append((self.get_heuristic(child), dist, child))
childrenNodes = [(child, dist) for c, dist, child in sorted(children)]
# Visualisation:
listOrder = [node for node, _, _, _ in self.mem.deque]
self.mem.dequeVisual.clear()
self.mem.dequeVisual.extend([(i, n) for i, n in enumerate(listOrder)])
self.key_frame()
if len(childrenNodes) == 0:
# No children Nodes so go back and pop a new parent node
continue
else:
# Get the next child and add them to the front of the deque:
nextNode, dist = childrenNodes.pop(0)
self.mem.deque.insert(0, (nextNode, cost+dist, parentNode, None))
# Add parent Node to the back of the queue
self.mem.deque.append((parentNode, cost, prev, childrenNodes))
# Reaching this means the deque is empty but no solutions have been found:
return False
def _follow_back_trace(self):
grid: Map = self._get_grid()
trace: List[Point] = self.get_back_trace(grid.goal)
trace.reverse()
for t in trace:
self.move_agent(t)
if isinstance(grid, RosMap):
grid.publish_wp(grid.agent.position)
self.key_frame(ignore_key_frame_skip=True)
def get_back_trace(self, goal: Goal) -> List[Point]:
"""
Follows the back pointer until it gets to the agent position
:return: The trace
"""
trace = []
pos = goal.position
while self.mem.back_pointer[pos] is not None:
trace.append(pos)
pos = self.mem.back_pointer[pos]
return trace
def get_heuristic(self, pos: Point) -> float:
"""
Returns the euclidean distance from the given position to the goal
It does memoization as well
:param goal: The goal
:param pos: The initial position
:return:
"""
self.mem.h.setdefault(pos, np.linalg.norm(np.array(pos) - np.array(self._get_grid().goal.position)))
return self.mem.h[pos]
|
455259
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.cracker import cracker
def test_cracker():
"""Test module cracker.py by downloading
cracker.csv and testing shape of
extracted data has 3292 rows and 14 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = cracker(test_path)
try:
assert x_train.shape == (3292, 14)
except:
shutil.rmtree(test_path)
raise()
|
455288
|
import os, sys
import glob
from sh import tar
import numpy as np
def main():
tar_files = sys.argv[1]
sample_ratio = float(sys.argv[2])
np.random.seed(1234)
tar_files = glob.glob(tar_files)
print(tar_files)
work_dir = os.path.dirname(tar_files[0])
os.system(f'mkdir -p {work_dir}/binary_files/')
for tar_file in tar_files:
print(f'extracting {tar_file}')
tar('-C', f'{work_dir}/binary_files/', '-xvf', tar_file)
all_files = glob.glob(f'{work_dir}/binary_files/*.jsonl')
all_files.sort()
print(f'{len(all_files)} in total')
sampled_files = np.random.choice(all_files, replace=False, size=int(sample_ratio * len(all_files)))
print(f'{len(sampled_files)} sampled files')
os.chdir(work_dir)
with open(f'sampled_binaries.txt', 'w') as f:
for fname in sampled_files:
fname = os.path.basename(fname)
f.write(fname + '\n')
print('creating tar file')
os.chdir('binary_files/')
tar('-cf', f'../sampled_binaries_{sample_ratio}.tar', '-T', '../sampled_binaries.txt')
if __name__ == '__main__':
main()
|
455311
|
S=int(input('Enter the Size: '))
TSUM=int(input('Enter Target Sum: '))
a=[]
for i in range(S):
a.append(int(input('Enter the array ElementS: ')))
def Quadruple(a,TSUM):
a.sort()
for i in range(S-3):
for j in range(i+1,S-2):
k=TSUM-(a[i]+a[j])
l=j+1
h=S-1
while l<h:
if a[l]+a[h]<k:
l+=1
elif a[l]+a[h]>k:
h-=1
else:
print("".join([str(a[i]),str(a[j]),str(a[l]),str(a[h])])+'$',end='')
l,h=l+1,h-1
Quadruple(a,TSUM)
|
455346
|
import json
class CustomError(Exception):
pass
def lambda_handler(event, context):
num_of_winners = event['input']
# Trigger the Failed process
if 'exception' in event:
raise CustomError("An error occurred!!")
return {
"body": {
"num_of_winners": num_of_winners
}
}
|
455347
|
import itertools
import threading
import unittest
from luigi.contrib.hdfs import get_autoconfig_client
class HdfsClientTest(unittest.TestCase):
def test_get_autoconfig_client_cached(self):
original_client = get_autoconfig_client()
for _ in range(100):
self.assertIs(original_client, get_autoconfig_client())
def test_threaded_clients_different(self):
clients = []
def add_client():
clients.append(get_autoconfig_client())
# run a bunch of threads to get new clients in them
threads = [threading.Thread(target=add_client) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for client1, client2 in itertools.combinations(clients, 2):
self.assertIsNot(client1, client2)
|
455350
|
import copy
import confu.schema
import vaping
import vaping.config
import vaping.io
from vaping.plugins import PluginConfigSchema
try:
import vodka
import vodka.data
except ImportError:
pass
try:
import graphsrv
import graphsrv.group
except ImportError:
graphsrv = None
def probe_to_graphsrv(probe):
"""
takes a probe instance and generates
a graphsrv data group for it using the
probe's config
"""
config = probe.config
# manual group set up via `group` config key
if "group" in config:
source, group = config["group"].split(".")
group_field = config.get("group_field", "host")
group_value = config[group_field]
graphsrv.group.add(
source, group, {group_value: {group_field: group_value}}, **config
)
return
# automatic group setup for fping
for group_name, group_config in list(probe.groups.items()):
if "hosts" not in group_config:
continue
r = {}
for host in group_config.get("hosts"):
if isinstance(host, dict):
r[host["host"]] = host
else:
r[host] = {"host": host}
graphsrv.group.add(probe.name, group_name, r, **group_config)
class VodkaSchema(PluginConfigSchema):
"""
Define a schema for FPing and also define defaults.
"""
data = confu.schema.List(item=vaping.config.MixedDict())
apps = confu.schema.Dict(item=vaping.config.MixedDict())
plugins = confu.schema.List(item=vaping.config.MixedDict())
@vaping.plugin.register("vodka")
class VodkaPlugin(vaping.plugins.EmitBase):
"""
Plugin that emits to vodka data
"""
# starting vodka automatically when vaping is spinning
# up all the plugins causes some inconsistent behaviour
# in daemon mode, so we allow it to lazy start for now
#
# TODO: might need to revisit later
lazy_start = True
# Define config schema
ConfigSchema = VodkaSchema
def init(self):
self._is_started = False
def start(self):
if self._is_started:
return
# deep copy vodka plugin config and prepare to pass
# to vodka as it's own copy with type and name keys
# removed
vodka_config = copy.deepcopy(self.config)
if "name" in vodka_config:
del vodka_config["name"]
if "type" in vodka_config:
del vodka_config["type"]
self._is_started = True
vodka.run(vodka_config, self.vaping.config)
if graphsrv:
# if graphsrv is installed proceed to generate
# target configurations for it from probe config
for node in self.vaping.config.get("probes", []):
probe = vaping.plugin.get_probe(node, self.vaping)
probe_to_graphsrv(probe)
def emit(self, message):
if not self._is_started:
self.start()
vodka.data.handle(
message.get("type"), message, data_id=message.get("source"), caller=self
)
|
455351
|
from ..broker import Broker
class DeviceGroupMemberBroker(Broker):
controller = "device_group_members"
def show(self, **kwargs):
"""Shows the details for the specified device group member.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record.
:type DeviceGroupMemberID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_member: The device group member identified by the specified DeviceGroupMemberID.
:rtype device_group_member: DeviceGroupMember
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device group members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record.
:type DeviceGroupMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record.
:type DeviceGroupMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this membership record.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this membership record.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the group associated with this membership record.
:type GroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the group associated with this membership record.
:type GroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device group members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceGroupMemberID
:param sort: The data field(s) to use for sorting the output. Default is DeviceGroupMemberID. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceGroupMember. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_members: An array of the DeviceGroupMember objects that match the specified input criteria.
:rtype device_group_members: Array of DeviceGroupMember
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device group members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DeviceGroupMemberChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DeviceGroupMemberChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberEndTime: The ending effective time of this record, or empty if still in effect.
:type DeviceGroupMemberEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberEndTime: The ending effective time of this record, or empty if still in effect.
:type DeviceGroupMemberEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record.
:type DeviceGroupMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record.
:type DeviceGroupMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberStartTime: The starting effective time of this record.
:type DeviceGroupMemberStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberStartTime: The starting effective time of this record.
:type DeviceGroupMemberStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberTimestamp: The date and time this record was collected or calculated.
:type DeviceGroupMemberTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupMemberTimestamp: The date and time this record was collected or calculated.
:type DeviceGroupMemberTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this membership record.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this membership record.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the group associated with this membership record.
:type GroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the group associated with this membership record.
:type GroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device group members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceGroupMemberID
:param sort: The data field(s) to use for sorting the output. Default is DeviceGroupMemberID. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceGroupMember. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device group members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceGroupMemberChangedCols, DeviceGroupMemberEndTime, DeviceGroupMemberID, DeviceGroupMemberStartTime, DeviceGroupMemberTimestamp, DeviceID, GroupID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_members: An array of the DeviceGroupMember objects that match the specified input criteria.
:rtype device_group_members: Array of DeviceGroupMember
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device group members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceGroupMemberChangedCols, DeviceGroupMemberEndTime, DeviceGroupMemberID, DeviceGroupMemberStartTime, DeviceGroupMemberTimestamp, DeviceID, GroupID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceGroupMemberChangedCols: The operator to apply to the field DeviceGroupMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceGroupMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceGroupMemberChangedCols: If op_DeviceGroupMemberChangedCols is specified, the field named in this input will be compared to the value in DeviceGroupMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberChangedCols must be specified if op_DeviceGroupMemberChangedCols is specified.
:type val_f_DeviceGroupMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceGroupMemberChangedCols: If op_DeviceGroupMemberChangedCols is specified, this value will be compared to the value in DeviceGroupMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberChangedCols must be specified if op_DeviceGroupMemberChangedCols is specified.
:type val_c_DeviceGroupMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceGroupMemberEndTime: The operator to apply to the field DeviceGroupMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceGroupMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceGroupMemberEndTime: If op_DeviceGroupMemberEndTime is specified, the field named in this input will be compared to the value in DeviceGroupMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberEndTime must be specified if op_DeviceGroupMemberEndTime is specified.
:type val_f_DeviceGroupMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceGroupMemberEndTime: If op_DeviceGroupMemberEndTime is specified, this value will be compared to the value in DeviceGroupMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberEndTime must be specified if op_DeviceGroupMemberEndTime is specified.
:type val_c_DeviceGroupMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceGroupMemberID: The operator to apply to the field DeviceGroupMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceGroupMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceGroupMemberID: If op_DeviceGroupMemberID is specified, the field named in this input will be compared to the value in DeviceGroupMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberID must be specified if op_DeviceGroupMemberID is specified.
:type val_f_DeviceGroupMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceGroupMemberID: If op_DeviceGroupMemberID is specified, this value will be compared to the value in DeviceGroupMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberID must be specified if op_DeviceGroupMemberID is specified.
:type val_c_DeviceGroupMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceGroupMemberStartTime: The operator to apply to the field DeviceGroupMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceGroupMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceGroupMemberStartTime: If op_DeviceGroupMemberStartTime is specified, the field named in this input will be compared to the value in DeviceGroupMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberStartTime must be specified if op_DeviceGroupMemberStartTime is specified.
:type val_f_DeviceGroupMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceGroupMemberStartTime: If op_DeviceGroupMemberStartTime is specified, this value will be compared to the value in DeviceGroupMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberStartTime must be specified if op_DeviceGroupMemberStartTime is specified.
:type val_c_DeviceGroupMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceGroupMemberTimestamp: The operator to apply to the field DeviceGroupMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceGroupMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceGroupMemberTimestamp: If op_DeviceGroupMemberTimestamp is specified, the field named in this input will be compared to the value in DeviceGroupMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberTimestamp must be specified if op_DeviceGroupMemberTimestamp is specified.
:type val_f_DeviceGroupMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceGroupMemberTimestamp: If op_DeviceGroupMemberTimestamp is specified, this value will be compared to the value in DeviceGroupMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberTimestamp must be specified if op_DeviceGroupMemberTimestamp is specified.
:type val_c_DeviceGroupMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device associated with this membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_GroupID: The operator to apply to the field GroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GroupID: The internal NetMRI identifier for the group associated with this membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_GroupID: If op_GroupID is specified, the field named in this input will be compared to the value in GroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GroupID must be specified if op_GroupID is specified.
:type val_f_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_GroupID: If op_GroupID is specified, this value will be compared to the value in GroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GroupID must be specified if op_GroupID is specified.
:type val_c_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device group members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceGroupMemberID
:param sort: The data field(s) to use for sorting the output. Default is DeviceGroupMemberID. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceGroupMember. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_group_members: An array of the DeviceGroupMember objects that match the specified input criteria.
:rtype device_group_members: Array of DeviceGroupMember
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
|
455372
|
import os
import json
import argparse
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
parser = argparse.ArgumentParser(description='The script to generate data for harbor v1.4.0')
parser.add_argument('--endpoint', '-e', dest='endpoint', required=True, help='The endpoint to harbor')
parser.add_argument('--version', '-v', dest='version', required=False, help='The version to harbor')
args = parser.parse_args()
url = "https://"+args.endpoint+"/api/"
endpoint_url = "https://"+args.endpoint
print url
class HarborAPI:
def create_project(self, project_name):
body=dict(body={"project_name": ""+project_name+"", "metadata": {"public": "true"}})
request(url+"projects", 'post', **body)
def create_user(self, username):
payload = {"username":username, "email":<EMAIL>", "password":"<PASSWORD>", "realname":username, "comment":"string"}
body=dict(body=payload)
request(url+"users", 'post', **body)
def set_user_admin(self, user):
r = request(url+"users?username="+user+"", 'get')
userid = str(r.json()[0]['user_id'])
if args.version == "1.6":
body=dict(body={"has_admin_role": True})
else:
body=dict(body={"has_admin_role": 1})
request(url+"users/"+userid+"/sysadmin", 'put', **body)
def add_member(self, project, user, role):
r = request(url+"projects?name="+project+"", 'get')
projectid = str(r.json()[0]['project_id'])
if args.version == "1.6":
payload = {"member_user":{ "username": ""+user+""},"role_id": role}
else:
payload = {"roles": [role], "username":""+user+""}
body=dict(body=payload)
request(url+"projects/"+projectid+"/members", 'post', **body)
def add_endpoint(self, endpointurl, endpointname, username, password, insecure):
payload = {
"credential":{
"access_key":""+username+"",
"access_secret":""+password+"",
"type":"basic"
},
"insecure":insecure,
"name":""+endpointname+"",
"type":"harbor",
"url":""+endpoint_url+""
}
body=dict(body=payload)
print body
request(url+"/registries", 'post', **body)
def add_replication_rule(self, project, target, trigger, rulename):
r = request(url+"registries?name="+target+"", 'get')
targetid = r.json()[0]['id']
payload = {"name": ""+rulename+"", "deletion": False, "enabled": True, "description": "string", "dest_registry": {"id": targetid},"trigger": {"type": "manual"}}
body=dict(body=payload)
request(url+"replication/policies", 'post', **body)
def update_project_setting(self, project, contenttrust, preventrunning, preventseverity, scanonpush):
r = request(url+"projects?name="+project+"", 'get')
projectid = str(r.json()[0]['project_id'])
payload = {
"project_name": ""+project+"",
"metadata": {
"public": "True",
"enable_content_trust": contenttrust,
"prevent_vulnerable_images_from_running": preventrunning,
"prevent_vulnerable_images_from_running_severity": preventseverity,
"automatically_scan_images_on_push": scanonpush
}
}
body=dict(body=payload)
request(url+"projects/"+projectid+"", 'put', **body)
def update_systemsetting(self, emailfrom, emailhost, emailport, emailuser, creation, selfreg, token):
payload = {
"auth_mode": "db_auth",
"email_from": emailfrom,
"email_host": emailhost,
"email_port": emailport,
"email_identity": "string",
"email_username": emailuser,
"email_ssl": True,
"email_insecure": True,
"project_creation_restriction": creation,
"read_only": False,
"self_registration": selfreg,
"token_expiration": token,
"scan_all_policy": {
"type": "none",
"parameter": {
"daily_time": 0
}
}
}
body=dict(body=payload)
request(url+"configurations", 'put', **body)
def update_repoinfo(self, reponame):
payload = {"description": "testdescription"}
body=dict(body=payload)
request(url+"repositories/"+reponame+"", 'put', **body)
def get_ca(self, target='/harbor/ca/ca.crt'):
url = "https://" + args.endpoint + "/api/systeminfo/getcert"
resp = request(url, 'get')
try:
ca_content = json.loads(resp.text)
except ValueError:
ca_content = resp.text
ca_path = '/harbor/ca'
if not os.path.exists(ca_path):
try:
os.makedirs(ca_path)
except Exception, e:
pass
open(target, 'wb').write(ca_content)
def request(url, method, user = None, userp = None, **kwargs):
if user is None:
user = "admin"
if userp is None:
userp = "Harbor12345"
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
resp = requests.request(method, url, verify=False, auth=(user, userp), **kwargs)
if resp.status_code >= 400:
raise Exception("[Exception Message] - {}".format(resp.text))
return resp
with open("data.json") as f:
data = json.load(f)
def pull_image(*image):
for i in image:
os.system("docker pull "+i)
def push_image(image, project):
os.system("docker tag "+image+" "+args.endpoint+"/"+project+"/"+image)
os.system("docker login "+args.endpoint+" -u Admin"+" -p Harbor12345")
os.system("docker push "+args.endpoint+"/"+project+"/"+image)
def push_signed_image(image, project, tag):
os.system("./sign_image.sh" + " " + args.endpoint + " " + project + " " + image + " " + tag)
def do_data_creation():
harborAPI = HarborAPI()
harborAPI.get_ca()
for user in data["users"]:
harborAPI.create_user(user["name"])
for user in data["admin"]:
harborAPI.set_user_admin(user["name"])
for project in data["projects"]:
harborAPI.create_project(project["name"])
for member in project["member"]:
harborAPI.add_member(project["name"], member["name"], member["role"])
pull_image("busybox", "redis", "haproxy", "alpine", "httpd:2")
push_image("busybox", data["projects"][0]["name"])
push_signed_image("alpine", data["projects"][0]["name"], "latest")
for endpoint in data["endpoint"]:
harborAPI.add_endpoint(endpoint["url"], endpoint["name"], endpoint["user"], endpoint["pass"], True)
for replicationrule in data["replicationrule"]:
harborAPI.add_replication_rule(replicationrule["project"],
replicationrule["endpoint"], replicationrule["trigger"],
replicationrule["rulename"])
for project in data["projects"]:
harborAPI.update_project_setting(project["name"],
project["configuration"]["enable_content_trust"],
project["configuration"]["prevent_vulnerable_images_from_running"],
project["configuration"]["prevent_vlunerable_images_from_running_severity"],
project["configuration"]["automatically_scan_images_on_push"])
harborAPI.update_systemsetting(data["configuration"]["emailsetting"]["emailfrom"],
data["configuration"]["emailsetting"]["emailserver"],
float(data["configuration"]["emailsetting"]["emailport"]),
data["configuration"]["emailsetting"]["emailuser"],
data["configuration"]["projectcreation"],
data["configuration"]["selfreg"],
float(data["configuration"]["token"]))
do_data_creation()
|
455382
|
import base64, os
from io import BytesIO
from PIL import ImageFont, ImageDraw, Image
from .. import static
path = os.path.join(static, 'high_eq_image.png')
fontpath = os.path.join(static, 'msyh.ttc')
def draw_text(img_pil: Image.Image, text: str, offset_x: float):
draw = ImageDraw.Draw(img_pil)
font = ImageFont.truetype(fontpath, 48)
width, height = draw.textsize(text, font)
x = 5
if width > 390:
font = ImageFont.truetype(fontpath, int(390 * 48 / width))
width, height = draw.textsize(text, font)
else:
x = int((400 - width) / 2)
draw.rectangle((x + offset_x - 2, 360, x + 2 + width + offset_x, 360 + height * 1.2), fill=(0, 0, 0, 255))
draw.text((x + offset_x, 360), text, font=font, fill=(255, 255, 255, 255))
def text_to_image(text: str) -> Image.Image:
font = ImageFont.truetype(fontpath, 24)
padding = 10
margin = 4
text_list = text.split('\n')
max_width = 0
for text in text_list:
w, h = font.getsize(text)
max_width = max(max_width, w)
wa = max_width + padding * 2
ha = h * len(text_list) + margin * (len(text_list) - 1) + padding * 2
i = Image.new('RGB', (wa, ha), color=(255, 255, 255))
draw = ImageDraw.Draw(i)
for j in range(len(text_list)):
text = text_list[j]
draw.text((padding, padding + j * (margin + h)), text, font=font, fill=(0, 0, 0))
return i
def image_to_base64(img: Image.Image, format='PNG') -> str:
output_buffer = BytesIO()
img.save(output_buffer, format)
byte_data = output_buffer.getvalue()
base64_str = base64.b64encode(byte_data).decode()
return 'base64://' + base64_str
|
455506
|
import ST7735
from PIL import Image, ImageDraw
from enviroplus.noise import Noise
print("""noise-amps-at-freqs.py - Measure amplitude from specific frequency bins
This example retrieves the median amplitude from 3 user-specified frequency ranges and plots them in Blue, Green and Red on the Enviro+ display.
As you play a continuous rising tone on your phone, you should notice peaks that correspond to the frequency entering each range.
Press Ctrl+C to exit!
""")
noise = Noise()
disp = ST7735.ST7735(
port=0,
cs=ST7735.BG_SPI_CS_FRONT,
dc=9,
backlight=12,
rotation=90)
disp.begin()
img = Image.new('RGB', (disp.width, disp.height), color=(0, 0, 0))
draw = ImageDraw.Draw(img)
while True:
amps = noise.get_amplitudes_at_frequency_ranges([
(100, 200),
(500, 600),
(1000, 1200)
])
amps = [n * 32 for n in amps]
img2 = img.copy()
draw.rectangle((0, 0, disp.width, disp.height), (0, 0, 0))
img.paste(img2, (1, 0))
draw.line((0, 0, 0, amps[0]), fill=(0, 0, 255))
draw.line((0, 0, 0, amps[1]), fill=(0, 255, 0))
draw.line((0, 0, 0, amps[2]), fill=(255, 0, 0))
disp.display(img)
|
455507
|
from __future__ import absolute_import
import re
import bisect
import sys
from graphite.tags.base import BaseTagDB, TaggedSeries
class RedisTagDB(BaseTagDB):
"""
Stores tag information in a Redis database.
Keys used are:
.. code-block:: none
series # Set of all paths
series:<path>:tags # Hash of all tag:value pairs for path
tags # Set of all tags
tags:<tag>:series # Set of paths with entry for tag
tags:<tag>:values # Set of values for tag
tags:<tag>:values:<value> # Set of paths matching tag/value
"""
def __init__(self, settings, *args, **kwargs):
super(RedisTagDB, self).__init__(settings, *args, **kwargs)
from redis import Redis
self.r = Redis(
host=settings.TAGDB_REDIS_HOST,
port=settings.TAGDB_REDIS_PORT,
db=settings.TAGDB_REDIS_DB,
password=settings.TAGDB_REDIS_PASSWORD,
decode_responses=(sys.version_info[0] >= 3),
)
def _find_series(self, tags, requestContext=None):
selector = None
selector_cnt = None
filters = []
# loop through tagspecs, look for best spec to use as selector
for tagspec in tags:
(tag, operator, spec) = self.parse_tagspec(tagspec)
if operator == '=':
matches_empty = spec == ''
if not matches_empty:
cnt = self.r.scard('tags:' + tag + ':values:' + spec)
if not selector or selector[1] != '=' or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, spec)
selector_cnt = cnt
continue
filters.append((tag, operator, spec))
elif operator == '=~':
pattern = re.compile(spec)
matches_empty = bool(pattern.match(''))
if not matches_empty and (not selector or selector[1] != '='):
cnt = self.r.scard('tags:' + tag + ':values')
if not selector or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, pattern)
selector_cnt = cnt
continue
filters.append((tag, operator, pattern))
elif operator == '!=':
matches_empty = spec != ''
if not matches_empty and (not selector or selector[1] != '='):
cnt = self.r.scard('tags:' + tag + ':values')
if not selector or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, spec)
selector_cnt = cnt
continue
filters.append((tag, operator, spec))
elif operator == '!=~':
pattern = re.compile(spec)
matches_empty = not pattern.match('')
if not matches_empty and (not selector or selector[1] != '='):
cnt = self.r.scard('tags:' + tag + ':values')
if not selector or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, pattern)
selector_cnt = cnt
continue
filters.append((tag, operator, pattern))
else:
raise ValueError("Invalid operator %s" % operator)
if not selector:
raise ValueError("At least one tagspec must not match the empty string")
# get initial list of series
(tag, operator, spec) = selector
# find list of values that match the tagspec
values = None
if operator == '=':
values = [spec]
elif operator == '=~':
# see if we can identify a literal prefix to filter by in redis
match = None
m = re.match('([a-z0-9]+)([^*?|][^|]*)?$', spec.pattern)
if m:
match = m.group(1) + '*'
values = [value for value in self.r.sscan_iter('tags:' + tag + ':values', match=match) if spec.match(value) is not None]
elif operator == '!=':
values = [value for value in self.r.sscan_iter('tags:' + tag + ':values') if value != spec]
elif operator == '!=~':
values = [value for value in self.r.sscan_iter('tags:' + tag + ':values') if spec.match(value) is None]
# if this query matched no values, just short-circuit since the result of the final intersect will be empty
if not values:
return []
results = []
# apply filters
operators = ['=','!=','=~','!=~']
filters.sort(key=lambda a: operators.index(a[1]))
for series in self.r.sunion(*['tags:' + tag + ':values:' + value for value in values]):
try:
parsed = self.parse(series)
except Exception:
continue
matched = True
for (tag, operator, spec) in filters:
value = parsed.tags.get(tag, '')
if (
(operator == '=' and value != spec) or
(operator == '=~' and spec.match(value) is None) or
(operator == '!=' and value == spec) or
(operator == '!=~' and spec.match(value) is not None)
):
matched = False
break
if matched:
bisect.insort_left(results, series)
return results
def get_series(self, path, requestContext=None):
tags = {}
tags = self.r.hgetall('series:' + path + ':tags')
if not tags:
return None
return TaggedSeries(tags['name'], tags)
def list_tags(self, tagFilter=None, limit=None, requestContext=None):
result = []
if tagFilter:
tagFilter = re.compile(tagFilter)
for tag in self.r.sscan_iter('tags'):
if tagFilter and tagFilter.match(tag) is None:
continue
if len(result) == 0 or tag >= result[-1]:
if limit and len(result) >= limit:
continue
result.append(tag)
else:
bisect.insort_left(result, tag)
if limit and len(result) > limit:
del result[-1]
return [
{'tag': tag}
for tag in result
]
def get_tag(self, tag, valueFilter=None, limit=None, requestContext=None):
if not self.r.sismember('tags', tag):
return None
return {
'tag': tag,
'values': self.list_values(
tag,
valueFilter=valueFilter,
limit=limit,
requestContext=requestContext
),
}
def list_values(self, tag, valueFilter=None, limit=None, requestContext=None):
result = []
if valueFilter:
valueFilter = re.compile(valueFilter)
for value in self.r.sscan_iter('tags:' + tag + ':values'):
if valueFilter and valueFilter.match(value) is None:
continue
if len(result) == 0 or value >= result[-1]:
if limit and len(result) >= limit:
continue
result.append(value)
else:
bisect.insort_left(result, value)
if limit and len(result) > limit:
del result[-1]
return [
{'value': value, 'count': self.r.scard('tags:' + tag + ':values:' + value)}
for value in result
]
def tag_series(self, series, requestContext=None):
# extract tags and normalize path
parsed = self.parse(series)
path = parsed.path
with self.r.pipeline() as pipe:
pipe.sadd('series', path)
for tag, value in parsed.tags.items():
pipe.hset('series:' + path + ':tags', tag, value)
pipe.sadd('tags', tag)
pipe.sadd('tags:' + tag + ':series', path)
pipe.sadd('tags:' + tag + ':values', value)
pipe.sadd('tags:' + tag + ':values:' + value, path)
pipe.execute()
return path
def del_series(self, series, requestContext=None):
# extract tags and normalize path
parsed = self.parse(series)
path = parsed.path
with self.r.pipeline() as pipe:
pipe.srem('series', path)
pipe.delete('series:' + path + ':tags')
for tag, value in parsed.tags.items():
pipe.srem('tags:' + tag + ':series', path)
pipe.srem('tags:' + tag + ':values:' + value, path)
pipe.execute()
return True
|
455536
|
from django.apps import AppConfig
class GithubIntegrationConfig(AppConfig):
name = 'integrations.github'
def ready(self):
import integrations.github.handlers
|
455549
|
import json
from pantomime.types import JSON
from opensanctions.core import Context
from opensanctions import helpers as h
def format_number(value):
if value is not None:
return "%.2f" % float(value)
def crawl(context: Context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as fh:
data = json.load(fh)
for entry in data.get("result", []):
wallet = context.make("CryptoWallet", target=True)
wallet.id = context.make_slug(entry.get("address"))
wallet.add("publicKey", entry.pop("address"))
wallet.add("topics", "crime.theft")
wallet.add("createdAt", entry.pop("createdAt"))
wallet.add("modifiedAt", entry.pop("updatedAt"))
wallet.add("alias", entry.pop("family"))
wallet.add("balance", format_number(entry.pop("balance")))
wallet.add("amountUsd", format_number(entry.pop("balanceUSD")))
wallet.add("currency", entry.pop("blockchain"))
h.audit_data(entry, ignore=["transactions"])
context.emit(wallet)
|
455550
|
import functools
import math
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple
import geometry_msgs.msg
import rospy
import std_msgs
from simulation_brain_link.msg import State as StateEstimationMsg
from simulation_groundtruth.srv import LaneSrv, SectionSrv
from tf2_msgs.msg import TFMessage
from simulation.utils.geometry import Line, Pose, Transform, Vector
from simulation.utils.ros_base.node_base import NodeBase
@dataclass
class DrivingState:
distance_driven: float
time: float
class AutomaticDriveNode(NodeBase):
"""ROS node to drive the car along the right side of the road.
Instead of directly modifying the car's position and speed.
The vehicle_simulation's output is emulated.
I.e. the transform from the vehicle to it's world coordinate system is published
and a state_estimation message published.
This enables to use the vehicle_simulation_link_node to move the car
and only replace KITcar_brain + vehicle_simulation!
Attributes:
pub_tf (rospy.publisher): Publishes the new vehicle/world transform.
state_estimation_publisher (rospy.Publisher): Publishes state estimation messages.
section_proxy (rospy.ServiceProxy): Connection to groundtruth section service.
lane_proxy (rospy.ServiceProxy): Connection to groundtruth lane service.
_driving_state (DrivingState): Keep track of time and distance along the road.
"""
def __init__(self):
super().__init__(
name="automatic_drive_node", log_level=rospy.INFO
) # Name can be overwritten in launch file
self.run(function=self.update, rate=float(self.param.rate))
def start(self):
"""Start node."""
self.pub_tf = rospy.Publisher(
"/tf", TFMessage, queue_size=100
) # See: https://github.com/ros/geometry2/blob/melodic-devel
# /tf2_ros/src/tf2_ros/transform_broadcaster.py
self.state_estimation_publisher = rospy.Publisher(
self.param.topics.vehicle_simulation_link.state_estimation,
StateEstimationMsg,
queue_size=1,
)
groundtruth_topics = self.param.topics.groundtruth
rospy.wait_for_service(groundtruth_topics.section, timeout=30)
# Create groundtruth service proxies
self.section_proxy = rospy.ServiceProxy(groundtruth_topics.section, SectionSrv)
self.lane_proxy = rospy.ServiceProxy(groundtruth_topics.lane, LaneSrv)
# Calculate the driving line once, so that it is cached!!
self.driving_line
# Read initial position from vehicle simulation link parameters
try:
initial = self.param.vehicle_simulation_link.initial_pose
if len(initial) > 3:
angle = initial[3]
del initial[3]
else:
angle = 0
pos = Vector(initial)
self.initial_tf = Transform(pos, angle)
except KeyError:
self.initial_tf = None
self._driving_state = DrivingState(0, rospy.Time.now().to_sec())
super().start()
def stop(self):
self.state_estimation_publisher.unregister()
self.pub_tf.unregister()
super().stop()
@functools.cached_property
def middle_line(self) -> Line:
"""Line: Line in the middle of the road."""
# Get all sections
sections: List[int] = self.section_proxy().sections
assert len(sections) > 0, (
"There must be atleast one road section. "
"(The groundtruth node might not be working correctly.)"
)
# Concatenate the middle line of all sections
return sum(
(Line(self.lane_proxy(sec.id).lane_msg.middle_line) for sec in sections), Line()
)
@functools.cached_property
def driving_line(self) -> Tuple[Line, List[List[float]]]:
"""Tuple[Line, List[List[float]]]: Line where car drives.
And points to stop at.
"""
path = Line()
stops = []
def append(offset, segment, stop):
nonlocal path
if offset > 0:
segment = segment.parallel_offset(offset, "left")
elif offset < 0:
segment = segment.parallel_offset(-offset, "right")
path += segment
if stop > 0:
stops.append([path.length, stop])
if self.param.randomize_path:
# Stitch a line together from varied offsets along the middle line
length = self.middle_line.length
x = 0
offset = 0
max_step = 4
road_width = 0.4
while x < length:
offset = max(
min((0.5 - random.random()) * 2 * road_width, road_width),
-road_width,
)
p = self.middle_line.interpolate_pose(x)
orth = Vector(1, 0, 0).rotated(p.get_angle() + math.pi / 2)
path += Line([p.position + offset * orth, p.position + offset * orth])
x += max_step * random.random()
else:
param_path: List[Dict[str, float]] = self.param.path
param_path = [obj for obj in param_path if "offset" in obj]
current_start = param_path[0]["start"]
current_offset = param_path[0]["offset"]
current_stop = 0
param_path.remove(param_path[0])
# Read the path from the parameters
for obj in param_path:
end_arc_length = obj["start"]
before_end_line = Line.cut(self.middle_line, end_arc_length)[0]
current_segment = Line.cut(before_end_line, current_start)[1]
append(current_offset, current_segment, current_stop)
current_offset = obj["offset"]
current_start = obj["start"]
current_stop = obj.get("stop", 0)
current_segment = Line.cut(self.middle_line, current_start)[1]
append(current_offset, current_segment, 0)
return path.simplify(0.05).smooth(0.1), stops
@functools.cached_property
def speeds(self):
out = []
speed_params = [obj for obj in self.param.path if "speed" in obj]
for param in speed_params:
path_length = self.middle_line.project(
self.driving_line[0].interpolate(param["start"])
)
out.append([path_length, int(param["speed"])])
return out
def update(self):
"""Calculate and publish new car state information."""
# Update the driving state
current_time = rospy.Time.now().to_sec()
current_speed = self.param.speed
d_time = current_time - self._driving_state.time
if (
len(self.speeds) > 1
and self.speeds[1][0] <= self._driving_state.distance_driven
):
# If we reach a new speed zone we delete the old one
del self.speeds[0]
# Set the current speed
current_speed = self.speeds[0][1] if len(self.speeds) > 0 else self.param.speed
current_speed /= 36 # km/h to m/s and model car scale of 1/10
# Check if the car needs to stop
remaining_stops = self.driving_line[1]
if len(remaining_stops) > 0:
if remaining_stops[0][0] < self._driving_state.distance_driven:
remaining_stops[0][1] -= d_time
if remaining_stops[0][1] > 0:
current_speed = 0
else:
del remaining_stops[0]
self._driving_state.distance_driven += d_time * current_speed
if (
not self.param.loop
and self._driving_state.distance_driven > self.driving_line[0].length
):
rospy.signal_shutdown("Finished driving along the road.")
return
self._driving_state.distance_driven %= self.driving_line[0].length
self._driving_state.time = current_time
rospy.logdebug(f"Current driving state: {self._driving_state}")
# Calculate position, speed, and yaw
position = self.driving_line[0].interpolate(self._driving_state.distance_driven)
# Depending on the align_with_middle_line parameter, the car is always parallel
# to the middle line or to the driving line.
alignment_line = (
self.middle_line if self.param.align_with_middle_line else self.driving_line[0]
)
# Always let the car face into the direction of the middle line.
pose = Pose(
position,
alignment_line.interpolate_direction(alignment_line.project(position)),
)
speed = Vector(current_speed, 0) # Ignore y component of speed
# Yaw rate = curvature * speed
yaw_rate = (
alignment_line.interpolate_curvature(
min(self._driving_state.distance_driven, alignment_line.length)
)
* current_speed
)
# Publish up to date messages!
self.update_world_vehicle_tf(
self.initial_tf.inverse * Transform(pose, pose.get_angle())
)
self.update_state_estimation(speed, yaw_rate)
def update_state_estimation(self, speed: Vector, yaw_rate: float):
"""Publish new state estimation message.
Args:
speed: Current speed in vehicle coordinates.
yaw_rate: Yaw rate of the car.
"""
msg = StateEstimationMsg()
msg.speed_x = speed.x
msg.speed_y = speed.y
msg.yaw_rate = yaw_rate
self.state_estimation_publisher.publish(msg)
def update_world_vehicle_tf(self, vehicle_world_tf: Transform):
"""Publish up to date world to vehicle transformation to /tf.
Args:
vehicle_world_tf(Transform): Transformation between vehicle and world frames.
"""
tf_stamped = geometry_msgs.msg.TransformStamped()
tf_stamped.header = std_msgs.msg.Header()
tf_stamped.header.stamp = rospy.Time.now()
# Transform from world to vehicle
tf_stamped.header.frame_id = self.param.vehicle_simulation_link.frame.world
tf_stamped.child_frame_id = self.param.vehicle_simulation_link.frame.vehicle
# Transformation from world to vehicle
tf_stamped.transform = (vehicle_world_tf).to_geometry_msg()
self.pub_tf.publish(TFMessage([tf_stamped]))
|
455569
|
from .data_structures import Dependency
from .layer_topology import LayerTopology
from .topology_manager import TopologyManager
from ..layers import GatherLayer
class GatherTopology(LayerTopology):
"""docstring for PaddingTopology"""
def __init__(self, layer):
super(GatherTopology, self).__init__(layer)
self.axis = self.layer.axis
if self.axis < 0:
self.axis = len(self.layer.input_shape) - self.axis
self.features_gathering = self.axis == len(self.layer.input_shape) - 1
def apply_layer_for_single_spatial_location(self, spatial_location, dependencies_values, output_index=0):
if self.features_gathering:
return self.layer(dependencies_values[0])
return dependencies_values[0]
def get_spatial_dependency(self, spatial_location, output_index=0):
if self.features_gathering:
input_spatial_location = spatial_location
else:
input_spatial_location = list(spatial_location)
input_spatial_location[self.axis - 1] = self.layer.indices[spatial_location[self.axis - 1]]
input_spatial_location = tuple(input_spatial_location)
return [Dependency(input_index=0, spatial_location=input_spatial_location)]
TopologyManager().register_layer_topology(GatherLayer, GatherTopology)
|
455583
|
import MDAnalysis as mda
import numpy as np
from swarmcg import config
def get_AA_bonds_distrib(ns, beads_ids, grp_type, grp_nb):
"""Calculate bonds distribution from AA trajectory.
ns requires:
aa2cg_universe
mda_backend
bw_constraints
bw_bonds
bonds_scaling
bonds_scaling_specific
bins_constraints
bins_bonds
ns creates:
bonds_rescaling_performed
"""
bond_values = np.empty(len(ns.aa2cg_universe.trajectory) * len(beads_ids))
frame_values = np.empty(len(beads_ids))
bead_pos_1 = np.empty((len(beads_ids), 3), dtype=np.float32)
bead_pos_2 = np.empty((len(beads_ids), 3), dtype=np.float32)
for ts in ns.aa2cg_universe.trajectory:
for i in range(len(beads_ids)):
bead_id_1, bead_id_2 = beads_ids[i]
bead_pos_1[i] = ns.aa2cg_universe.atoms[bead_id_1].position
bead_pos_2[i] = ns.aa2cg_universe.atoms[bead_id_2].position
mda.lib.distances.calc_bonds(bead_pos_1, bead_pos_2, backend=ns.mda_backend, box=None, result=frame_values)
bond_values[len(beads_ids) * ts.frame:len(beads_ids) * (ts.frame + 1)] = frame_values / 10 # retrieved nm
bond_avg_init = round(np.average(bond_values), 3)
# NOTE: for rescaling we first take the average of the group, then we rescale
# this means if a bond group has a bimodal distribution, the rescale distribution is still bimodal
# rescale all bonds length if argument -bonds_scaling is provided
if ns.bonds_scaling != config.bonds_scaling:
bond_values = [bond_length * ns.bonds_scaling for bond_length in bond_values]
bond_avg_final = round(np.average(bond_values), 3)
ns.bonds_rescaling_performed = True
print(" Ref. AA-mapped distrib. rescaled to avg", bond_avg_final, "nm for", grp_type, grp_nb + 1, "(initially",
bond_avg_init, "nm)")
# or shift distributions for bonds that are too short for direct CG mapping (according to argument -min_bonds_length)
elif bond_avg_init < ns.min_bonds_length:
bond_rescale_factor = ns.min_bonds_length / bond_avg_init
bond_values = [bond_length * bond_rescale_factor for bond_length in bond_values]
bond_avg_final = round(np.average(bond_values), 3)
ns.bonds_rescaling_performed = True
print(" Ref. AA-mapped distrib. rescaled to avg", bond_avg_final, "nm for", grp_type, grp_nb + 1, "(initially",
bond_avg_init, "nm)")
# or if specific lengths were provided for constraints and/or bonds
elif ns.bonds_scaling_specific is not None:
if grp_type.startswith("constraint"):
geom_id_full = f"C{grp_nb + 1}"
elif grp_type.startswith("bond"):
geom_id_full = f"B{grp_nb + 1}"
else:
# TODO: what should the code do here?
pass
if (geom_id_full.startswith("C") and geom_id_full in ns.bonds_scaling_specific) or (
geom_id_full.startswith("B") and geom_id_full in ns.bonds_scaling_specific):
bond_rescale_factor = ns.bonds_scaling_specific[geom_id_full] / bond_avg_init
bond_values = [bond_length * bond_rescale_factor for bond_length in bond_values]
bond_avg_final = round(np.average(bond_values), 3)
ns.bonds_rescaling_performed = True
print(" Ref. AA-mapped distrib. rescaled to avg", bond_avg_final, "nm for", grp_type, grp_nb + 1,
"(initially", bond_avg_init, "nm)")
else:
bond_avg_final = bond_avg_init
else:
bond_avg_final = bond_avg_init
# or alternatively, do not rescale these bonds but add specific exclusion rules, OR JUST SUGGEST USER TO CHECK THIS
# exclusions storage format: ns.cg_itp["exclusion"].append([int(bead_id)-1 for bead_id in sp_itp_line[0:2]])
if grp_type.startswith("constraint"):
bond_hist = np.histogram(bond_values, ns.bins_constraints, density=True)[
0] * ns.bw_constraints # retrieve 1-sum densities
elif grp_type.startswith("bond"):
bond_hist = np.histogram(bond_values, ns.bins_bonds, density=True)[0] * ns.bw_bonds # retrieve 1-sum densities
else:
# TODO: what should the code do here?
pass
return bond_avg_final, bond_hist, bond_values
def get_CG_bonds_distrib(ns, beads_ids, grp_type):
""""Calculate bonds distribution from CG trajectory.
ns requires:
cg_universe
bw_bonds
bw_constraints
bins_constraints
bins_bonds
"""
bond_values = np.empty(len(ns.cg_universe.trajectory) * len(beads_ids))
frame_values = np.empty(len(beads_ids))
bead_pos_1 = np.empty((len(beads_ids), 3), dtype=np.float32)
bead_pos_2 = np.empty((len(beads_ids), 3), dtype=np.float32)
for ts in ns.cg_universe.trajectory: # no need for PBC handling, trajectories were made wholes for the molecule
for i in range(len(beads_ids)):
bead_id_1, bead_id_2 = beads_ids[i]
bead_pos_1[i] = ns.cg_universe.atoms[bead_id_1].position
bead_pos_2[i] = ns.cg_universe.atoms[bead_id_2].position
mda.lib.distances.calc_bonds(bead_pos_1, bead_pos_2, backend=ns.mda_backend, box=None, result=frame_values)
bond_values[len(beads_ids) * ts.frame:len(beads_ids) * (ts.frame + 1)] = frame_values / 10 # retrieved nm
bond_avg = round(np.mean(bond_values), 3)
if grp_type == "constraint":
bond_hist = np.histogram(bond_values, ns.bins_constraints, density=True)[
0] * ns.bw_constraints # retrieve 1-sum densities
elif grp_type == "bond":
bond_hist = np.histogram(bond_values, ns.bins_bonds, density=True)[0] * ns.bw_bonds # retrieve 1-sum densities
else:
# TODO: what should the code do here?
pass
return bond_avg, bond_hist, bond_values
|
455588
|
from ..remote import RemoteModel
class ScriptModuleRemote(RemoteModel):
"""
Script module library information.
| ``id:`` The internal NetMRI identifier of the Script Module.
| ``attribute type:`` number
| ``name:`` The unique name of the Script Module.
| ``attribute type:`` string
| ``category:`` User defined category of the Script Module.
| ``attribute type:`` string
| ``description:`` A description for the Script Module.
| ``attribute type:`` string
| ``created_by:`` Indicates by whom Script Module was created.
| ``attribute type:`` string
| ``updated_by:`` Indicates by whom Script Module was updated.
| ``attribute type:`` string
| ``created_at:`` The date and time the Script Module was created.
| ``attribute type:`` datetime
| ``updated_at:`` The date and time the Script Module was updated.
| ``attribute type:`` datetime
| ``language:`` The language of the script module
| ``attribute type:`` string
"""
properties = ("id",
"name",
"category",
"description",
"created_by",
"updated_by",
"created_at",
"updated_at",
"language",
)
|
455610
|
class SelectedCellsChangedEventArgs(EventArgs):
"""
Provides data for the System.Windows.Controls.DataGrid.SelectedCellsChanged event.
SelectedCellsChangedEventArgs(addedCells: List[DataGridCellInfo],removedCells: List[DataGridCellInfo])
SelectedCellsChangedEventArgs(addedCells: ReadOnlyCollection[DataGridCellInfo],removedCells: ReadOnlyCollection[DataGridCellInfo])
"""
@staticmethod
def __new__(self,addedCells,removedCells):
"""
__new__(cls: type,addedCells: List[DataGridCellInfo],removedCells: List[DataGridCellInfo])
__new__(cls: type,addedCells: ReadOnlyCollection[DataGridCellInfo],removedCells: ReadOnlyCollection[DataGridCellInfo])
"""
pass
AddedCells=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the cells that were added to the selection.
Get: AddedCells(self: SelectedCellsChangedEventArgs) -> IList[DataGridCellInfo]
"""
RemovedCells=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of cells removed from the selection.
Get: RemovedCells(self: SelectedCellsChangedEventArgs) -> IList[DataGridCellInfo]
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.