hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71bbea834d6298b1b749394c01ad48a8ce0536c | 76,454 | py | Python | tests/lax_numpy_test.py | zhongwen/jax | 76d2a87915863d3a32732837cc7bf61b7b2f9e5b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/lax_numpy_test.py | zhongwen/jax | 76d2a87915863d3a32732837cc7bf61b7b2f9e5b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/lax_numpy_test.py | zhongwen/jax | 76d2a87915863d3a32732837cc7bf61b7b2f9e5b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from functools import partial
import itertools
import operator
import unittest
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import six
import numpy as onp
import jax.ops
from jax import api
from jax import lax
from jax import numpy as lnp
from jax import test_util as jtu
from jax.lib import xla_bridge
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = [onp.float32, onp.float64]
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng", "diff_modes", "test_name",
"check_dtypes"])
def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None,
check_dtypes=True):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng, diff_modes, test_name,
check_dtypes)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("add", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("greater", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("greater_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("less", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("less_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("maximum", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("minimum", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("multiply", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("not_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default(), []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("tan", 1, number_dtypes, all_shapes, jtu.rand_uniform(-1.5, 1.5),
["rev"]),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("arcsin", 1, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arccos", 1, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arctan", 1, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default(), [],
check_dtypes=False),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],
test_name="expm1_large"),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("floor_divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],
test_name="log1p_large"),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("logaddexp", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("logaddexp2", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes, jtu.rand_default(), []),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("sinc", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]),
op_record("where", 3, (onp.float32, onp.int64), all_shapes, jtu.rand_some_zero(), []),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default(), ["rev"]),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("prod", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("sum", 1, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("var", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("std", 1, inexact_dtypes, nonempty_shapes, jtu.rand_default(), []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),
op_record("argmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__floordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default(), []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default(), []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
# TODO(mattjj): lshift, rshift
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
]
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, all_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default(), []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default(), []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
]
if six.PY2:
JAX_OPERATOR_OVERLOADS += [
op_record("__div__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS += [
op_record("__rdiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: onp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: onp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes):
return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"check_dtypes": rec.check_dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, lnp_op, rng, shapes, dtypes, check_dtypes):
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
py_scalar_arg = jtu.PYTHON_SCALAR_SHAPE in shapes
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=check_dtypes and not py_scalar_arg)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "name": rec.name}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng, shapes, dtypes):
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "name": rec.name}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng, shapes, dtypes):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest() # TODO(mattjj): clean up
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
fun = lambda fst, snd: getattr(snd, name)(fst)
self._CompileAndCheck(fun, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes):
if not FLAGS.jax_enable_x64 and any(
onp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng": rec.rng, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims}
for rec in JAX_REDUCER_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True]))
def testReducer(self, onp_op, lnp_op, rng, shape, dtype, out_dtype, axis, keepdims):
onp_fun = lambda x: onp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
lnp_fun = lambda x: lnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng": rec.rng, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, lnp_op, rng, shape, dtype, axis, keepdims):
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
lnp_fun = lambda x: lnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
lnp_fun = lambda x: lnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng": rec.rng, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, lnp_op, rng, shape, dtype, axis):
if (dtype == onp.complex128 and FLAGS.jax_test_dut and
FLAGS.jax_test_dut.startswith("gpu")):
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis)
def lnp_fun(array_to_reduce):
return lnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng": rng}
for rng in [jtu.rand_default()]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
lnp_fun = lambda a, b: lnp.cross(a, b, axisa, axisb, axisc, axis)
onp_fun = lambda a, b: onp.cross(a, b, axisa, axisb, axisc, axis)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng": rng}
for rng in [jtu.rand_default()]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
self._CheckAgainstNumpy(onp.dot, lnp.dot, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp.dot, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng": rng}
for rng in [jtu.rand_default()]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
self._CheckAgainstNumpy(onp.matmul, lnp.matmul, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.matmul, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng": rng}
for rng in [jtu.rand_default()]
for lhs_shape, rhs_shape, axes in [
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)
onp_fun = lambda a, b: onp.tensordot(a, b, axes)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng": jtu.rand_default()}
# TODO(phawkins): support integer dtypes too.
for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2)
for lhs_shape, rhs_shape in [
(l, r) for l, r in CombosWithReplacement(all_shapes, 2)
if len(jtu._dims_of_shape(l)) == 0
or len(jtu._dims_of_shape(r)) == 0
or l[-1] == r[-1]]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
onp_fun = lambda lhs, rhs: onp.inner(lhs, rhs)
lnp_fun = lambda lhs, rhs: lnp.inner(lhs, rhs)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng": jtu.rand_default()}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-1, 1)]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng):
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
lnp_fun = lambda x: lnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng": jtu.rand_default()}
for shape in all_shapes for dtype in number_dtypes
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng):
if onp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
lnp_fun = lambda x: lnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank, "rng": jtu.rand_default(),
"irng": jtu.rand_int(3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
]
for shape in shapes for dtype in all_dtypes
for pad_width_rank in range(3)))
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng, irng):
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def lnp_fun(x, kwargs):
return lnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng": jtu.rand_default()}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for dtype in default_dtypes
for shape in all_shapes
))
def testTile(self, shape, dtype, reps, rng):
onp_fun = lambda arg: onp.tile(arg, reps)
lnp_fun = lambda arg: lnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in dtypes)),
"axis": axis, "base_shape": base_shape, "dtypes": dtypes,
"rng": jtu.rand_default()}
for num_arrs in [3]
for dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, dtypes, rng):
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]
onp_fun = lambda *args: onp.concatenate(args, axis=axis)
lnp_fun = lambda *args: lnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in dtypes)),
"axis": axis, "base_shape": base_shape, "dtypes": dtypes,
"rng": jtu.rand_default()}
for dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, dtypes, rng):
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]
onp_fun = lambda arr, values: onp.append(arr, values, axis=axis)
lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng": jtu.rand_default()}
for repeats in [0, 1, 2]
for dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng):
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
lnp_fun = lambda arg: lnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng": jtu.rand_default(), "lnp_op": getattr(lnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
# TODO(phawkins): replace both type lists with default_dtypes after a
# Jaxlib update includes
# https://github.com/google/jax/commit/86f5d189cf563b027c3cd00eea38072c003905c8
for dtype in [onp.float32, onp.int32]
for out_dtype in [onp.float32, onp.int32]
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng):
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng": jtu.rand_default()}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng):
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
lnp_fun = lambda: lnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng": jtu.rand_default()}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng):
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
lnp_fun = lambda arg: getattr(lnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng": jtu.rand_default()}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng):
onp_fun = lambda arg: onp.diag(arg, k)
lnp_fun = lambda arg: lnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng": jtu.rand_default()}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng):
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
lnp_fun = lambda arg: lnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
lnp_fun = lambda: lnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng": jtu.rand_default()}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng):
onp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype)
lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng": rng}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng in [jtu.rand_default()]))
def testStack(self, shape, axis, dtypes, rng):
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = partial(onp.stack, axis=axis)
lnp_fun = partial(lnp.stack, axis=axis)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng": rng}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng in [jtu.rand_default()]))
def testHVDStack(self, shape, op, dtypes, rng):
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = getattr(onp, op)
lnp_fun = getattr(lnp, op)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng": jtu.rand_default()}
for shape in array_shapes
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng):
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
lnp_fun = lambda fill_value: lnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng": jtu.rand_default()}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng):
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
lnp_fun = lambda x, fill_value: lnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng": jtu.rand_default()}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng):
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
lnp_fun = lambda x: lnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng": jtu.rand_default()}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng):
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
lnp_fun = lambda x: fn(lnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng": jtu.rand_default()}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng):
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng": jtu.rand_default()}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng):
onp_fun = lambda x: onp.reshape(x, out_shape)
lnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng": jtu.rand_default()}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng):
onp_fun = lambda x: onp.expand_dims(x, dim)
lnp_fun = lambda x: lnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng": jtu.rand_default()}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng):
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
lnp_fun = lambda x: lnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng": jtu.rand_default()}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng):
onp_fun = lambda x: onp.squeeze(x, ax)
lnp_fun = lambda x: lnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng": jtu.rand_default(), "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape in nonempty_shapes
for dtype in number_dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng):
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype),
None if weights_shape is None else rng(weights_shape, dtype)]
try:
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}".format(i), "arg": arg}
for i, arg in enumerate([
3., [1, 2, 3], [1., 2., 3.],
[[1, 2], [3, 4], [5, 6]], [[1, 2.], [3, 4], [5, 6]],
[[3, onp.array(2), 1], onp.arange(3.)],
])))
def testArray(self, arg):
args_maker = lambda: [arg]
self._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp.array, args_maker, check_dtypes=True)
def testIssue121(self):
assert not onp.isscalar(lnp.array(3))
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = lnp.array(a)
assert ans == 3.
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(lnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return lnp.all(lnp.array(elements_close))
csame = api.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu") # TODO(mattjj): investigate this failure
def testOnesBroadcastingConstantHandler(self):
# TODO(mattjj): update this test for jax3
self.skipTest("test needs jax3 update")
def fun(x):
ones = lnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
# To check that the constant handler generates a Broadcast for stride-zero
# arrays, we monkey-patch the client instance.
# TODO(mattjj): once we have better HLO dumping and inspecting facilities,
# we can check the HLO more directly.
c = x._node.c
Broadcast = c.Broadcast # pylint: disable=invalid-name
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones # the ndarray constant handler should call Broadcast here
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(lnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, lnp.ndarray)
return lnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return lnp.add(x, y)
def f(x, y):
return lnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = lnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng": rng, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in range(-len(shape), len(shape)) # Test negative axes
for rng in [jtu.rand_default()]))
def testFlip(self, shape, dtype, axis, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng": rng, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testFlipud(self, shape, dtype, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng": rng, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testFliplr(self, shape, dtype, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng": rng, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testRot90(self, shape, dtype, k, axes, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(lnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
# TODO(mattjj): test other ndarray-like method overrides
def testOnpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(lnp.eye(3), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
expected = onp.arange(0.0, 1.0, 0.1)
ans = lnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
# manual tests for sort are nice because we don't have to worry about ties.
# lax.sort is tested combinatorially.
ans = lnp.sort(onp.array([16, 15, 23, 42, 8, 4]))
expected = onp.array([4, 8, 15, 16, 23, 42])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a, axis=None)
expected = onp.array([1, 1, 3, 4])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a) # last axis
expected = onp.array([[1, 4], [1, 3]])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a, axis=0)
expected = onp.array([[1, 1], [3, 4]])
self.assertAllClose(expected, ans, check_dtypes=True)
def testArgsortManually(self):
x = onp.array([16, 15, 23, 42, 8, 4])
ans = lnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=0)
expected = onp.argsort(x, axis=0)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=1)
expected = onp.argsort(x, axis=1)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=None)
expected = onp.argsort(x, axis=None)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng": rng, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1))
]
for rng in [jtu.rand_default()]))
def testRoll(self, shape, dtype, shifts, axis, rng):
args_maker = lambda: [rng(shape, dtype)]
lnp_op = lambda x: lnp.roll(x, shifts, axis=axis)
onp_op = lambda x: onp.roll(x, shifts, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng": rng, "rng_indices": rng_indices, "shape": shape,
"index_shape": index_shape, "dtype": dtype, "index_dtype": index_dtype,
"axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)), [None])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng in [jtu.rand_default()]
for rng_indices in [jtu.rand_int(-5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode, rng,
rng_indices):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
lnp_op = lambda x, i: lnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng": rng, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (3, 4), (3, 4, 5)]
for axis in itertools.chain(range(len(shape)), [-1], [None])
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testTakeAlongAxis(self, shape, dtype, axis, rng):
def args_maker():
x = rng(shape, dtype)
i = onp.argsort(x, axis=axis)
return x, i
lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng": jtu.rand_default()}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng):
onp_fun = lambda arg: onp.vander(arg, N=n, increasing=increasing)
lnp_fun = lambda arg: lnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng": jtu.rand_some_inf_and_nan(), "shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, rng, shape, dtype):
dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp.nan_to_num, lnp.nan_to_num, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.nan_to_num, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng": jtu.rand_default(), "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.float32,)),
(((3,), (4,)), (onp.float32, onp.int32)),
(((3,), (0,), (4,)), (onp.int32, onp.float32, onp.int32)),
)))
def testIx_(self, rng, shapes, dtypes):
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, lnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.ix_, args_maker, check_dtypes=True)
def testIssue330(self):
x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
# disabled this test after https://github.com/google/jax/issues/732
msg = ("jax.numpy differs from numpy in promotion rules for Python scalars."
" See https://github.com/google/jax/issues/732.")
raise SkipTest(msg)
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = lnp.eye(3, dtype=lnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testIssue347(self):
# https://github.com/google/jax/issues/347
def test_fail(x):
x = lnp.sqrt(lnp.sum(x ** 2, axis=1))
ones = lnp.ones_like(x)
x = lnp.where(x > 0.5, x, ones)
return lnp.sum(x)
x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)
result = api.grad(test_fail)(x)
assert not onp.any(onp.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = onp.arange(6) + 1
ans = lnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(
op, {bool: "bool", int: "int", float: "float"}[dtype]),
"dtype": dtype, "op": op}
for dtype in [int, float, bool]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
onp_fun = lambda arg: getattr(onp, op)(arg)
lnp_fun = lambda arg: getattr(lnp, op)(arg)
args_maker = lambda: [dtype(2)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
def testLongLong(self):
self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),
check_dtypes=True)
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92
self.assertAllClose(lnp.arange(77),
onp.arange(77), check_dtypes=True)
self.assertAllClose(lnp.arange(2, 13),
onp.arange(2, 13), check_dtypes=True)
self.assertAllClose(lnp.arange(4, 21, 9),
onp.arange(4, 21, 9), check_dtypes=True)
self.assertAllClose(lnp.arange(53, 5, -3),
onp.arange(53, 5, -3), check_dtypes=True)
# TODO(mattjj): make these tests work when jax_enable_x64=True
# self.assertAllClose(lnp.arange(77, dtype=float),
# onp.arange(77, dtype=float), check_dtypes=True)
# self.assertAllClose(lnp.arange(2, 13, dtype=int),
# onp.arange(2, 13, dtype=int), check_dtypes=True)
self.assertAllClose(lnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5), check_dtypes=True)
self.assertRaises(TypeError, lambda: lnp.arange())
# test that lnp.arange(N) doesn't instantiate an ndarray
self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77)))
self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77)))
def testIssue830(self):
a = lnp.arange(4, dtype=lnp.complex64)
self.assertEqual(a.dtype, lnp.complex64)
def testIssue728(self):
assert lnp.allclose(lnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
lnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = lnp.linspace(190, 200, 4)
f = api.grad(lambda x: lnp.sum(lnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
lnp_op = getattr(lnp, op)
dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2. -1., 0., 1., 2., 100., onp.inf,
onp.finfo(dtype).max, onp.sqrt(onp.finfo(dtype).max),
onp.sqrt(onp.finfo(dtype).max) * 2.):
if onp.isnan(x) and op in ("cosh", "expm1", "exp"):
# TODO(b/133842876, b/133842870): these return wrong outputs on CPU for
# NaN inputs.
continue
if (op in ("sin", "cos", "tan", "arctan") and FLAGS.jax_test_dut and
FLAGS.jax_test_dut.startswith("tpu")):
continue # TODO(b/132196789, b/134175194): fix and reenable.
x = dtype(x)
expected = onp_op(x)
actual = lnp_op(x)
self.assertAllClose(expected, actual, check_dtypes=True)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = lnp.ones((10, 10))
v = lnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = lnp.ones((3, 4))
self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2))
if __name__ == "__main__":
absltest.main()
| 46.589884 | 100 | 0.651895 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from functools import partial
import itertools
import operator
import unittest
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import six
import numpy as onp
import jax.ops
from jax import api
from jax import lax
from jax import numpy as lnp
from jax import test_util as jtu
from jax.lib import xla_bridge
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = [onp.float32, onp.float64]
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng", "diff_modes", "test_name",
"check_dtypes"])
def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None,
check_dtypes=True):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng, diff_modes, test_name,
check_dtypes)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("add", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("greater", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("greater_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("less", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("less_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),
op_record("maximum", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("minimum", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("multiply", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("not_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default(), []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("tan", 1, number_dtypes, all_shapes, jtu.rand_uniform(-1.5, 1.5),
["rev"]),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("arcsin", 1, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arccos", 1, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arctan", 1, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small(), ["rev"]),
]
JAX_COMPOUND_OP_RECORDS = [
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default(), [],
check_dtypes=False),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],
test_name="expm1_large"),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("floor_divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],
test_name="log1p_large"),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("logaddexp", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("logaddexp2", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes, jtu.rand_default(), []),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default(), []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("sinc", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]),
op_record("where", 3, (onp.float32, onp.int64), all_shapes, jtu.rand_some_zero(), []),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default(), ["rev"]),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool(), []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("prod", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("sum", 1, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("var", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("std", 1, inexact_dtypes, nonempty_shapes, jtu.rand_default(), []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),
op_record("argmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__floordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default(), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
]
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, all_dtypes, all_shapes, jtu.rand_small_positive(), []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default(), []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default(), []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),
]
if six.PY2:
JAX_OPERATOR_OVERLOADS += [
op_record("__div__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS += [
op_record("__rdiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: onp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: onp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
class LaxBackedNumpyTests(jtu.JaxTestCase):
def _GetArgsMaker(self, rng, shapes, dtypes):
return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"check_dtypes": rec.check_dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, lnp_op, rng, shapes, dtypes, check_dtypes):
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
py_scalar_arg = jtu.PYTHON_SCALAR_SHAPE in shapes
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=check_dtypes and not py_scalar_arg)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "name": rec.name}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng, shapes, dtypes):
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "name": rec.name}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng, shapes, dtypes):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest()
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
fun = lambda fst, snd: getattr(snd, name)(fst)
self._CompileAndCheck(fun, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng": rec.rng, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes):
if not FLAGS.jax_enable_x64 and any(
onp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng": rec.rng, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims}
for rec in JAX_REDUCER_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True]))
def testReducer(self, onp_op, lnp_op, rng, shape, dtype, out_dtype, axis, keepdims):
onp_fun = lambda x: onp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
lnp_fun = lambda x: lnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng": rec.rng, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, lnp_op, rng, shape, dtype, axis, keepdims):
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
lnp_fun = lambda x: lnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
lnp_fun = lambda x: lnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng": rec.rng, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, lnp_op, rng, shape, dtype, axis):
if (dtype == onp.complex128 and FLAGS.jax_test_dut and
FLAGS.jax_test_dut.startswith("gpu")):
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis)
def lnp_fun(array_to_reduce):
return lnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng": rng}
for rng in [jtu.rand_default()]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)],
[(2, 4), (2, 4), (-1, -1, -1, 0)],
[(3, 4), (3, 4), (-1, -1, -1, 0)],
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)],
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)],
[(6, 1, 3), (5, 3), (-1, -1, -1, None)],
[(6, 1, 2), (5, 3), (-1, -1, -1, None)],
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)],
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)],
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)]
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
lnp_fun = lambda a, b: lnp.cross(a, b, axisa, axisb, axisc, axis)
onp_fun = lambda a, b: onp.cross(a, b, axisa, axisb, axisc, axis)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng": rng}
for rng in [jtu.rand_default()]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
self._CheckAgainstNumpy(onp.dot, lnp.dot, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp.dot, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng": rng}
for rng in [jtu.rand_default()]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
self._CheckAgainstNumpy(onp.matmul, lnp.matmul, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.matmul, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng": rng}
for rng in [jtu.rand_default()]
for lhs_shape, rhs_shape, axes in [
[(2, 3, 4), (5, 6, 7), 0], [(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)
onp_fun = lambda a, b: onp.tensordot(a, b, axes)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng": jtu.rand_default()}
for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2)
for lhs_shape, rhs_shape in [
(l, r) for l, r in CombosWithReplacement(all_shapes, 2)
if len(jtu._dims_of_shape(l)) == 0
or len(jtu._dims_of_shape(r)) == 0
or l[-1] == r[-1]]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
onp_fun = lambda lhs, rhs: onp.inner(lhs, rhs)
lnp_fun = lambda lhs, rhs: lnp.inner(lhs, rhs)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng": jtu.rand_default()}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-1, 1)]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng):
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
lnp_fun = lambda x: lnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng": jtu.rand_default()}
for shape in all_shapes for dtype in number_dtypes
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng):
if onp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
lnp_fun = lambda x: lnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank, "rng": jtu.rand_default(),
"irng": jtu.rand_int(3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
]
for shape in shapes for dtype in all_dtypes
for pad_width_rank in range(3)))
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng, irng):
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def lnp_fun(x, kwargs):
return lnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng": jtu.rand_default()}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for dtype in default_dtypes
for shape in all_shapes
))
def testTile(self, shape, dtype, reps, rng):
onp_fun = lambda arg: onp.tile(arg, reps)
lnp_fun = lambda arg: lnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in dtypes)),
"axis": axis, "base_shape": base_shape, "dtypes": dtypes,
"rng": jtu.rand_default()}
for num_arrs in [3]
for dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, dtypes, rng):
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]
onp_fun = lambda *args: onp.concatenate(args, axis=axis)
lnp_fun = lambda *args: lnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in dtypes)),
"axis": axis, "base_shape": base_shape, "dtypes": dtypes,
"rng": jtu.rand_default()}
for dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, dtypes, rng):
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]
onp_fun = lambda arr, values: onp.append(arr, values, axis=axis)
lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng": jtu.rand_default()}
for repeats in [0, 1, 2]
for dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng):
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
lnp_fun = lambda arg: lnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng": jtu.rand_default(), "lnp_op": getattr(lnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
for dtype in [onp.float32, onp.int32]
for out_dtype in [onp.float32, onp.int32]
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng):
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng": jtu.rand_default()}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng):
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
lnp_fun = lambda: lnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng": jtu.rand_default()}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng):
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
lnp_fun = lambda arg: getattr(lnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng": jtu.rand_default()}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng):
onp_fun = lambda arg: onp.diag(arg, k)
lnp_fun = lambda arg: lnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng": jtu.rand_default()}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng):
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
lnp_fun = lambda arg: lnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
lnp_fun = lambda: lnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng": jtu.rand_default()}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng):
onp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype)
lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng": rng}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng in [jtu.rand_default()]))
def testStack(self, shape, axis, dtypes, rng):
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = partial(onp.stack, axis=axis)
lnp_fun = partial(lnp.stack, axis=axis)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng": rng}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng in [jtu.rand_default()]))
def testHVDStack(self, shape, op, dtypes, rng):
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = getattr(onp, op)
lnp_fun = getattr(lnp, op)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng": jtu.rand_default()}
for shape in array_shapes
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng):
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
lnp_fun = lambda fill_value: lnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng": jtu.rand_default()}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng):
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
lnp_fun = lambda x, fill_value: lnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng": jtu.rand_default()}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng):
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
lnp_fun = lambda x: lnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng": jtu.rand_default()}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng):
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
lnp_fun = lambda x: fn(lnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng": jtu.rand_default()}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng):
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng": jtu.rand_default()}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng):
onp_fun = lambda x: onp.reshape(x, out_shape)
lnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng": jtu.rand_default()}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng):
onp_fun = lambda x: onp.expand_dims(x, dim)
lnp_fun = lambda x: lnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng": jtu.rand_default()}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng):
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
lnp_fun = lambda x: lnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng": jtu.rand_default()}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng):
onp_fun = lambda x: onp.squeeze(x, ax)
lnp_fun = lambda x: lnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng": jtu.rand_default(), "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape in nonempty_shapes
for dtype in number_dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for weights_shape in ([None, shape] if axis is None else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng):
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype),
None if weights_shape is None else rng(weights_shape, dtype)]
try:
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}".format(i), "arg": arg}
for i, arg in enumerate([
3., [1, 2, 3], [1., 2., 3.],
[[1, 2], [3, 4], [5, 6]], [[1, 2.], [3, 4], [5, 6]],
[[3, onp.array(2), 1], onp.arange(3.)],
])))
def testArray(self, arg):
args_maker = lambda: [arg]
self._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp.array, args_maker, check_dtypes=True)
def testIssue121(self):
assert not onp.isscalar(lnp.array(3))
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = lnp.array(a)
assert ans == 3.
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(lnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return lnp.all(lnp.array(elements_close))
csame = api.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu") # TODO(mattjj): investigate this failure
def testOnesBroadcastingConstantHandler(self):
# TODO(mattjj): update this test for jax3
self.skipTest("test needs jax3 update")
def fun(x):
ones = lnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
# To check that the constant handler generates a Broadcast for stride-zero
# arrays, we monkey-patch the client instance.
# TODO(mattjj): once we have better HLO dumping and inspecting facilities,
# we can check the HLO more directly.
c = x._node.c
Broadcast = c.Broadcast # pylint: disable=invalid-name
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones # the ndarray constant handler should call Broadcast here
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(lnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, lnp.ndarray)
return lnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return lnp.add(x, y)
def f(x, y):
return lnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = lnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng": rng, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for rng in [jtu.rand_default()]))
def testFlip(self, shape, dtype, axis, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng": rng, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testFlipud(self, shape, dtype, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng": rng, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testFliplr(self, shape, dtype, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng": rng, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testRot90(self, shape, dtype, k, axes, rng):
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(lnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
def testOnpMean(self):
x = lax.add(lnp.eye(3), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
expected = onp.arange(0.0, 1.0, 0.1)
ans = lnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
# lax.sort is tested combinatorially.
ans = lnp.sort(onp.array([16, 15, 23, 42, 8, 4]))
expected = onp.array([4, 8, 15, 16, 23, 42])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a, axis=None)
expected = onp.array([1, 1, 3, 4])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a) # last axis
expected = onp.array([[1, 4], [1, 3]])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a, axis=0)
expected = onp.array([[1, 1], [3, 4]])
self.assertAllClose(expected, ans, check_dtypes=True)
def testArgsortManually(self):
x = onp.array([16, 15, 23, 42, 8, 4])
ans = lnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=0)
expected = onp.argsort(x, axis=0)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=1)
expected = onp.argsort(x, axis=1)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=None)
expected = onp.argsort(x, axis=None)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng": rng, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1))
]
for rng in [jtu.rand_default()]))
def testRoll(self, shape, dtype, shifts, axis, rng):
args_maker = lambda: [rng(shape, dtype)]
lnp_op = lambda x: lnp.roll(x, shifts, axis=axis)
onp_op = lambda x: onp.roll(x, shifts, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng": rng, "rng_indices": rng_indices, "shape": shape,
"index_shape": index_shape, "dtype": dtype, "index_dtype": index_dtype,
"axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)), [None])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng in [jtu.rand_default()]
for rng_indices in [jtu.rand_int(-5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode, rng,
rng_indices):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
lnp_op = lambda x, i: lnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng": rng, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (3, 4), (3, 4, 5)]
for axis in itertools.chain(range(len(shape)), [-1], [None])
for dtype in default_dtypes
for rng in [jtu.rand_default()]))
def testTakeAlongAxis(self, shape, dtype, axis, rng):
def args_maker():
x = rng(shape, dtype)
i = onp.argsort(x, axis=axis)
return x, i
lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng": jtu.rand_default()}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng):
onp_fun = lambda arg: onp.vander(arg, N=n, increasing=increasing)
lnp_fun = lambda arg: lnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng": jtu.rand_some_inf_and_nan(), "shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, rng, shape, dtype):
dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp.nan_to_num, lnp.nan_to_num, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.nan_to_num, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng": jtu.rand_default(), "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.float32,)),
(((3,), (4,)), (onp.float32, onp.int32)),
(((3,), (0,), (4,)), (onp.int32, onp.float32, onp.int32)),
)))
def testIx_(self, rng, shapes, dtypes):
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, lnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.ix_, args_maker, check_dtypes=True)
def testIssue330(self):
x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
msg = ("jax.numpy differs from numpy in promotion rules for Python scalars."
" See https://github.com/google/jax/issues/732.")
raise SkipTest(msg)
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = lnp.eye(3, dtype=lnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testIssue347(self):
def test_fail(x):
x = lnp.sqrt(lnp.sum(x ** 2, axis=1))
ones = lnp.ones_like(x)
x = lnp.where(x > 0.5, x, ones)
return lnp.sum(x)
x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)
result = api.grad(test_fail)(x)
assert not onp.any(onp.isnan(result))
def testIssue453(self):
a = onp.arange(6) + 1
ans = lnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(
op, {bool: "bool", int: "int", float: "float"}[dtype]),
"dtype": dtype, "op": op}
for dtype in [int, float, bool]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, dtype, op):
onp_fun = lambda arg: getattr(onp, op)(arg)
lnp_fun = lambda arg: getattr(lnp, op)(arg)
args_maker = lambda: [dtype(2)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
def testLongLong(self):
self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),
check_dtypes=True)
def testArange(self):
self.assertAllClose(lnp.arange(77),
onp.arange(77), check_dtypes=True)
self.assertAllClose(lnp.arange(2, 13),
onp.arange(2, 13), check_dtypes=True)
self.assertAllClose(lnp.arange(4, 21, 9),
onp.arange(4, 21, 9), check_dtypes=True)
self.assertAllClose(lnp.arange(53, 5, -3),
onp.arange(53, 5, -3), check_dtypes=True)
self.assertAllClose(lnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5), check_dtypes=True)
self.assertRaises(TypeError, lambda: lnp.arange())
self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77)))
self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77)))
def testIssue830(self):
a = lnp.arange(4, dtype=lnp.complex64)
self.assertEqual(a.dtype, lnp.complex64)
def testIssue728(self):
assert lnp.allclose(lnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
lnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = lnp.linspace(190, 200, 4)
f = api.grad(lambda x: lnp.sum(lnp.tanh(x)))
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
lnp_op = getattr(lnp, op)
dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2. -1., 0., 1., 2., 100., onp.inf,
onp.finfo(dtype).max, onp.sqrt(onp.finfo(dtype).max),
onp.sqrt(onp.finfo(dtype).max) * 2.):
if onp.isnan(x) and op in ("cosh", "expm1", "exp"):
continue
if (op in ("sin", "cos", "tan", "arctan") and FLAGS.jax_test_dut and
FLAGS.jax_test_dut.startswith("tpu")):
continue
x = dtype(x)
expected = onp_op(x)
actual = lnp_op(x)
self.assertAllClose(expected, actual, check_dtypes=True)
def testIssue883(self):
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = lnp.ones((10, 10))
v = lnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v)
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = lnp.ones((3, 4))
self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2))
if __name__ == "__main__":
absltest.main()
| true | true |
f71bbf7a4a188527959ac45431aaf4fe7372b6f2 | 702 | py | Python | google/ads/googleads/v6/services/services/keyword_plan_ad_group_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/keyword_plan_ad_group_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/keyword_plan_ad_group_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import KeywordPlanAdGroupServiceClient
__all__ = ("KeywordPlanAdGroupServiceClient",)
| 33.428571 | 74 | 0.759259 |
from .client import KeywordPlanAdGroupServiceClient
__all__ = ("KeywordPlanAdGroupServiceClient",)
| true | true |
f71bbf8502afa8422043b8e08ba0756892e46a96 | 3,593 | py | Python | tutorial/settings.py | luotuo/spider-for-apple-store | b4b2bfb6227df8fce7f8e3ab635703e93bb89ed1 | [
"MIT"
] | 20 | 2017-12-17T08:07:12.000Z | 2020-12-11T02:31:59.000Z | tutorial/settings.py | luotuo/spider-for-apple-store | b4b2bfb6227df8fce7f8e3ab635703e93bb89ed1 | [
"MIT"
] | null | null | null | tutorial/settings.py | luotuo/spider-for-apple-store | b4b2bfb6227df8fce7f8e3ab635703e93bb89ed1 | [
"MIT"
] | 6 | 2017-12-17T08:07:14.000Z | 2020-12-11T02:32:00.000Z | # -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tutorial.pipelines.TutorialPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# start MySQL database configure setting
MYSQL_HOST = 'localhost'
MYSQL_DBNAME = 'spider'
MYSQL_USER = 'root'
MYSQL_PASSWD = 'root'
# end of MySQL database configure setting
ITEM_PIPELINES = {
# 'tutorial.pipelines.JsonWithEncodingTutorialPipeline': 300,
'tutorial.pipelines.MySQLStoreTutorialPipeline': 300,
}
| 34.548077 | 123 | 0.775953 |
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
ROBOTSTXT_OBEY = False
Y = 3
t'
MYSQL_DBNAME = 'spider'
MYSQL_USER = 'root'
MYSQL_PASSWD = 'root'
ITEM_PIPELINES = {
'tutorial.pipelines.MySQLStoreTutorialPipeline': 300,
}
| true | true |
f71bbfffc3a193e2f3f52cdb443e6740cf88b7e3 | 6,052 | py | Python | Ensemble Learning/AdaBoost.py | DiogoRibeiro7/Machine-Learning | d2c789851f8b4eaf74cdd0c18af072f60cd45cb3 | [
"Apache-2.0"
] | null | null | null | Ensemble Learning/AdaBoost.py | DiogoRibeiro7/Machine-Learning | d2c789851f8b4eaf74cdd0c18af072f60cd45cb3 | [
"Apache-2.0"
] | null | null | null | Ensemble Learning/AdaBoost.py | DiogoRibeiro7/Machine-Learning | d2c789851f8b4eaf74cdd0c18af072f60cd45cb3 | [
"Apache-2.0"
] | null | null | null | """
@Filename: AdaptiveBoost.py
@Author: Diogo Ribeiro
@Create Date: 2019-05-03
@Update Date: 2019-05-03
@Description: Implement of Adaptive Boosting
"""
import numpy as np
import preProcess
import pickle
import random
import SVM
import math
class Adaboost:
def __init__(self, norm_type="Normalization", iterations=5, base_classifier="SVM"):
self.iterations = iterations
self.norm_type = norm_type
self.base_classifier = SVM.SVMClassifier()
self.prediction = None
self.probability = None
self.classifier_set = None
'''
Function: baseClassifier
Description: generate weak classifier
Input: train_data dataType: ndarray description: train_data
train_label dataType: ndarray description: train_label
w dataType: ndarray description: weight
Output: clf dataType: object description: weak classifier
weighted_error dataType: float description: weighted error
base_predictions dataType: object description: base predictions
'''
def baseClassifier(self, train_data, train_label, w):
sample_num = len(train_data)
error_index = np.ones([sample_num, 1])
clf = self.base_classifier
clf.train(train_data, train_label)
base_predictions = np.sign(clf.predict(train_data))
for i in range(sample_num):
if base_predictions[i] == train_label[i]:
error_index[i] = 0
weighted_error = np.dot(w.T, error_index)
return clf, weighted_error, base_predictions
'''
Function: updataAlpha
Description: updata alpha
Input: error dataType: float description: weighted error
Output: new_alpha dataType: float description: new alpha
'''
def updateAlpha(self, error):
temp = (1.0 - error)/max(error, 10e-6)
new_alpha = 1/2 * math.log(temp, math.e)
return new_alpha
'''
Function: train
Description: train the model
Input: train_data dataType: ndarray description: features
train_label dataType: ndarray description: labels
Output: clf_set dataType: list description: classifiers set
'''
def train(self, train_data, train_label):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
train_label = np.expand_dims(train_label, axis=1)
sample_num = len(train_data)
weak_classifier = []
# initialize weights
w = np.ones([sample_num, 1])
w = w/sample_num
# predictions
agg_predicts = np.zeros([sample_num, 1]) # aggregate value of prediction
# start train
for i in range(self.iterations):
base_clf, error, base_prediction = self.baseClassifier(train_data, train_label, w)
alpha = self.updateAlpha(error)
weak_classifier.append((alpha, base_clf))
# update parameters in page of 139 Eq.(8.4)
expon = np.multiply(-1 * alpha * train_label, base_prediction)
w = np.multiply(w, np.exp(expon))
w = w/w.sum()
# calculate the total error rate
agg_predicts += alpha*base_prediction
error_rate = np.multiply(np.sign(agg_predicts) != train_label, np.ones([sample_num, 1]))
error_rate = error_rate.sum()/sample_num
if error_rate == 0:
break
self.classifier_set = weak_classifier
return weak_classifier
'''
Function: predict
Description: predict the testing set
Input: train_data dataType: ndarray description: features
prob dataType: bool description: return probaility of label
Output: prediction dataType: ndarray description: the prediction results for testing set
'''
def predict(self, test_data, prob="False"):
# Normalization
if self.norm_type == "Standardization":
test_data = preProcess.Standardization(test_data)
else:
test_data = preProcess.Normalization(test_data)
test_num = test_data.shape[0]
prediction = np.zeros([test_num, 1])
probability = np.zeros([test_num, 1])
for classifier in self.classifier_set:
alpha = classifier[0]
clf = classifier[1]
base_prediction = alpha * clf.predict(test_data)
probability += base_prediction
self.prediction = np.sign(probability)
self.probability = probability
if prob:
return probability
else:
return prediction
'''
Function: accuracy
Description: show detection result
Input: test_label dataType: ndarray description: labels of test data
Output: accuracy dataType: float description: detection accuarcy
'''
def accuarcy(self, test_label):
test_label = np.expand_dims(test_label, axis=1)
prediction = self.prediction
accuarcy = sum(prediction == test_label)/len(test_label)
return accuarcy
'''
Function: save
Description: save the model as pkl
Input: filename dataType: str description: the path to save model
'''
def save(self, filename):
f = open(filename, 'w')
pickle.dump(self.classifier_set, f)
f.close()
'''
Function: load
Description: load the model
Input: filename dataType: str description: the path to save model
Output: self dataType: obj description: the trained model
'''
def load(self, filename):
f = open(filename)
self.classifier_set = pickle.load(f)
return self
| 34.982659 | 100 | 0.607403 |
import numpy as np
import preProcess
import pickle
import random
import SVM
import math
class Adaboost:
def __init__(self, norm_type="Normalization", iterations=5, base_classifier="SVM"):
self.iterations = iterations
self.norm_type = norm_type
self.base_classifier = SVM.SVMClassifier()
self.prediction = None
self.probability = None
self.classifier_set = None
def baseClassifier(self, train_data, train_label, w):
sample_num = len(train_data)
error_index = np.ones([sample_num, 1])
clf = self.base_classifier
clf.train(train_data, train_label)
base_predictions = np.sign(clf.predict(train_data))
for i in range(sample_num):
if base_predictions[i] == train_label[i]:
error_index[i] = 0
weighted_error = np.dot(w.T, error_index)
return clf, weighted_error, base_predictions
def updateAlpha(self, error):
temp = (1.0 - error)/max(error, 10e-6)
new_alpha = 1/2 * math.log(temp, math.e)
return new_alpha
def train(self, train_data, train_label):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
train_label = np.expand_dims(train_label, axis=1)
sample_num = len(train_data)
weak_classifier = []
w = np.ones([sample_num, 1])
w = w/sample_num
agg_predicts = np.zeros([sample_num, 1])
for i in range(self.iterations):
base_clf, error, base_prediction = self.baseClassifier(train_data, train_label, w)
alpha = self.updateAlpha(error)
weak_classifier.append((alpha, base_clf))
expon = np.multiply(-1 * alpha * train_label, base_prediction)
w = np.multiply(w, np.exp(expon))
w = w/w.sum()
agg_predicts += alpha*base_prediction
error_rate = np.multiply(np.sign(agg_predicts) != train_label, np.ones([sample_num, 1]))
error_rate = error_rate.sum()/sample_num
if error_rate == 0:
break
self.classifier_set = weak_classifier
return weak_classifier
def predict(self, test_data, prob="False"):
if self.norm_type == "Standardization":
test_data = preProcess.Standardization(test_data)
else:
test_data = preProcess.Normalization(test_data)
test_num = test_data.shape[0]
prediction = np.zeros([test_num, 1])
probability = np.zeros([test_num, 1])
for classifier in self.classifier_set:
alpha = classifier[0]
clf = classifier[1]
base_prediction = alpha * clf.predict(test_data)
probability += base_prediction
self.prediction = np.sign(probability)
self.probability = probability
if prob:
return probability
else:
return prediction
def accuarcy(self, test_label):
test_label = np.expand_dims(test_label, axis=1)
prediction = self.prediction
accuarcy = sum(prediction == test_label)/len(test_label)
return accuarcy
def save(self, filename):
f = open(filename, 'w')
pickle.dump(self.classifier_set, f)
f.close()
def load(self, filename):
f = open(filename)
self.classifier_set = pickle.load(f)
return self
| true | true |
f71bc02fee45fb7edf6e131216efbc2de8692361 | 1,173 | py | Python | examples/example2.py | hangvane/lpsolve_wrapper | cff5ffa827c10420bdc6b3ceb5d9a3852c91281d | [
"Apache-2.0"
] | 4 | 2021-01-27T14:47:30.000Z | 2021-05-06T03:16:11.000Z | examples/example2.py | hangvane/lpsolve_wrapper | cff5ffa827c10420bdc6b3ceb5d9a3852c91281d | [
"Apache-2.0"
] | null | null | null | examples/example2.py | hangvane/lpsolve_wrapper | cff5ffa827c10420bdc6b3ceb5d9a3852c91281d | [
"Apache-2.0"
] | 1 | 2022-03-25T13:22:45.000Z | 2022-03-25T13:22:45.000Z | # http://web.mit.edu/lpsolve/doc/Python.htm
# P = (110)(1.30)x + (30)(2.00)y + (125)(1.56) = 143x + 60y + 195z
# 120x + 210y + 150.75z <= 15000
# 110x + 30y + 125z <= 4000
# x + y + z <= 75
# x >= 0, y >= 0, z >= 0
import lpsolve_wrapper as lw
model = lw.Model(
notations={
'x': lw.notation(
lower_bound=0,
),
'y': lw.notation(
lower_bound=0,
),
'z': lw.notation(
lower_bound=0,
)
})
model.add_constr(
coefs=[
lw.coef('x', 120),
lw.coef('y', 210),
lw.coef('z', 150.75),
],
right_value=15000,
constr_type=lw.LEQ
)
model.add_constr(
coefs=[
lw.coef('x', 110),
lw.coef('y', 30),
lw.coef('z', 125),
],
right_value=4000,
constr_type=lw.LEQ
)
model.add_constr(
coefs=[
lw.coef('x', 1),
lw.coef('y', 1),
lw.coef('z', 1),
],
right_value=75,
constr_type=lw.LEQ
)
objective, notation_list = model.lp_solve(
obj_func={
'x': 143,
'y': 60,
'z': 195,
},
minimize=False
)
print('objective:', objective)
print('notations:', notation_list)
| 19.881356 | 66 | 0.491901 |
import lpsolve_wrapper as lw
model = lw.Model(
notations={
'x': lw.notation(
lower_bound=0,
),
'y': lw.notation(
lower_bound=0,
),
'z': lw.notation(
lower_bound=0,
)
})
model.add_constr(
coefs=[
lw.coef('x', 120),
lw.coef('y', 210),
lw.coef('z', 150.75),
],
right_value=15000,
constr_type=lw.LEQ
)
model.add_constr(
coefs=[
lw.coef('x', 110),
lw.coef('y', 30),
lw.coef('z', 125),
],
right_value=4000,
constr_type=lw.LEQ
)
model.add_constr(
coefs=[
lw.coef('x', 1),
lw.coef('y', 1),
lw.coef('z', 1),
],
right_value=75,
constr_type=lw.LEQ
)
objective, notation_list = model.lp_solve(
obj_func={
'x': 143,
'y': 60,
'z': 195,
},
minimize=False
)
print('objective:', objective)
print('notations:', notation_list)
| true | true |
f71bc084b54b7d6c91980ac0cca1fd8f504aca87 | 1,277 | py | Python | ganzige/urls.py | kekehurry/ganzige.site | 4fc2fce6c03b302e115feccae6e02bd9e1e8231d | [
"MIT"
] | null | null | null | ganzige/urls.py | kekehurry/ganzige.site | 4fc2fce6c03b302e115feccae6e02bd9e1e8231d | [
"MIT"
] | null | null | null | ganzige/urls.py | kekehurry/ganzige.site | 4fc2fce6c03b302e115feccae6e02bd9e1e8231d | [
"MIT"
] | null | null | null | """ganzige URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^$', include('home.urls', namespace='home')),
url(r'webhook/', include('webhook.urls', namespace='webhook')),
url(r'^admin/', include(admin.site.urls)),
url(r'ckeditor/', include('ckeditor_uploader.urls')),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^photo/', include('photo.urls', namespace='photo')),
url(r'^data/', include('data.urls', namespace='data')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.69697 | 77 | 0.697729 | from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^$', include('home.urls', namespace='home')),
url(r'webhook/', include('webhook.urls', namespace='webhook')),
url(r'^admin/', include(admin.site.urls)),
url(r'ckeditor/', include('ckeditor_uploader.urls')),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^photo/', include('photo.urls', namespace='photo')),
url(r'^data/', include('data.urls', namespace='data')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f71bc0fe58921e92e78ae069e621bc45875ba2a5 | 1,047 | py | Python | src/unity/python/doc/source/sphinx_turicreate_ext/pycon.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 2 | 2019-02-08T08:45:27.000Z | 2020-09-07T05:55:18.000Z | src/unity/python/doc/source/sphinx_turicreate_ext/pycon.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 3 | 2022-02-15T04:42:24.000Z | 2022-03-12T01:05:15.000Z | src/unity/python/doc/source/sphinx_turicreate_ext/pycon.py | ZeroInfinite/turicreate | dd210c2563930881abd51fd69cb73007955b33fd | [
"BSD-3-Clause"
] | 1 | 2019-06-01T18:49:28.000Z | 2019-06-01T18:49:28.000Z | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import sys
from code import InteractiveInterpreter
def main():
"""
Print lines of input along with output.
"""
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n')
if __name__ == '__main__':
main()
# vim: set expandtab shiftwidth=4 softtabstop=4 :
| 26.846154 | 85 | 0.598854 |
import sys
from code import InteractiveInterpreter
def main():
"""
Print lines of input along with output.
"""
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n')
if __name__ == '__main__':
main()
| false | true |
f71bc16b49327031ea7d379964913ab43a1e9c34 | 3,176 | py | Python | microchain/chain.py | ciknight/microchain | d740571fd84f18bc1b635b2fccb9f349180709fa | [
"MIT"
] | null | null | null | microchain/chain.py | ciknight/microchain | d740571fd84f18bc1b635b2fccb9f349180709fa | [
"MIT"
] | 40 | 2018-07-30T21:14:20.000Z | 2021-06-25T15:15:20.000Z | microchain/chain.py | ciknight/microchain | d740571fd84f18bc1b635b2fccb9f349180709fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from typing import List
from microchain.block import Block
__all__ = ["Chain"]
class Chain:
_interval = 10 # second
def __init__(self, blocks: List[Block] = None) -> None:
self.blocks = blocks or [Chain.genesis()]
def __len__(self):
return self.length
def __repr__(self):
return f"Chain({repr(self.blocks)})"
@property
def interval(self):
return self._interval
@property
def length(self) -> int:
return len(self.blocks)
@property
def latest_block(self) -> Block:
return self.blocks[-1]
def add_block(self, block: Block) -> bool:
if block.valid is True:
self.blocks.append(block)
return True
return False
@staticmethod
def genesis() -> Block:
args = (0, "0", "Genesis Block")
nonce = 0
# (difficulty 1): 0x00ffff * 2**(8*(0x1d - 3))
target = "0x00000000FFFF0000000000000000000000000000000000000000000000000000"
while True:
block = Block(*args, nonce=nonce, target=target)
if block.valid is True:
break
else:
nonce += 1
return block
@property
def difficulty(self) -> float:
""" Difficulty is Calculate the hash of times.
Url: https://en.bitcoin.it/wiki/Difficulty#How_often_does_the_network_difficulty_change.3F
"""
difficulty_1_target = (
"0x00000000FFFF0000000000000000000000000000000000000000000000000000"
)
return float(int(difficulty_1_target, 16) / int(self.latest_block.target, 16))
@property
def current_target(self) -> str:
""" Retarget """
lb = self.latest_block
# Every 10 blocks change network difficulty, bitcoin is 2016 blocks.
block_count = 10
target_timespan = block_count * self.interval
if self.length % block_count != 0:
return lb.target
else:
ratio_limit = 4
actual_timespan = lb.timestamp - self.blocks[-block_count].timestamp
adjusted_timespan = min(
max(actual_timespan, target_timespan / ratio_limit),
target_timespan * ratio_limit,
)
assert 1 / ratio_limit <= adjusted_timespan / target_timespan <= ratio_limit
logging.info(
f"Retargeting at {self.length}, difficulty change: {target_timespan/adjusted_timespan:.2%}"
)
new_target = int(lb.target, 16) * adjusted_timespan / target_timespan
return f"{int(new_target):x}".rjust(64, "0")
def generate_next(self, data: str) -> Block:
lb = self.latest_block
args = (lb.index + 1, lb.hash, data)
nonce = 0
while True:
new_block = Block(*args, nonce=nonce, target=self.current_target)
if new_block.valid is True:
break
else:
nonce += 1
return new_block
def mine(self, data: str) -> bool:
next_block = self.generate_next(data)
return self.add_block(next_block)
| 30.247619 | 107 | 0.588791 |
import logging
from typing import List
from microchain.block import Block
__all__ = ["Chain"]
class Chain:
_interval = 10
def __init__(self, blocks: List[Block] = None) -> None:
self.blocks = blocks or [Chain.genesis()]
def __len__(self):
return self.length
def __repr__(self):
return f"Chain({repr(self.blocks)})"
@property
def interval(self):
return self._interval
@property
def length(self) -> int:
return len(self.blocks)
@property
def latest_block(self) -> Block:
return self.blocks[-1]
def add_block(self, block: Block) -> bool:
if block.valid is True:
self.blocks.append(block)
return True
return False
@staticmethod
def genesis() -> Block:
args = (0, "0", "Genesis Block")
nonce = 0
target = "0x00000000FFFF0000000000000000000000000000000000000000000000000000"
while True:
block = Block(*args, nonce=nonce, target=target)
if block.valid is True:
break
else:
nonce += 1
return block
@property
def difficulty(self) -> float:
difficulty_1_target = (
"0x00000000FFFF0000000000000000000000000000000000000000000000000000"
)
return float(int(difficulty_1_target, 16) / int(self.latest_block.target, 16))
@property
def current_target(self) -> str:
lb = self.latest_block
block_count = 10
target_timespan = block_count * self.interval
if self.length % block_count != 0:
return lb.target
else:
ratio_limit = 4
actual_timespan = lb.timestamp - self.blocks[-block_count].timestamp
adjusted_timespan = min(
max(actual_timespan, target_timespan / ratio_limit),
target_timespan * ratio_limit,
)
assert 1 / ratio_limit <= adjusted_timespan / target_timespan <= ratio_limit
logging.info(
f"Retargeting at {self.length}, difficulty change: {target_timespan/adjusted_timespan:.2%}"
)
new_target = int(lb.target, 16) * adjusted_timespan / target_timespan
return f"{int(new_target):x}".rjust(64, "0")
def generate_next(self, data: str) -> Block:
lb = self.latest_block
args = (lb.index + 1, lb.hash, data)
nonce = 0
while True:
new_block = Block(*args, nonce=nonce, target=self.current_target)
if new_block.valid is True:
break
else:
nonce += 1
return new_block
def mine(self, data: str) -> bool:
next_block = self.generate_next(data)
return self.add_block(next_block)
| true | true |
f71bc1a8abff19dc9122898c70187b1df427ad48 | 5,311 | py | Python | duty/my_signals/templates/template.py | ximetov/IrCa-Duty | 666c2d26c9cd7d314798cfb222ad91dfeee4a5b6 | [
"MIT"
] | 6 | 2020-05-18T21:53:27.000Z | 2020-07-06T12:48:00.000Z | duty/my_signals/templates/template.py | ximetov/IrCa-Duty | 666c2d26c9cd7d314798cfb222ad91dfeee4a5b6 | [
"MIT"
] | null | null | null | duty/my_signals/templates/template.py | ximetov/IrCa-Duty | 666c2d26c9cd7d314798cfb222ad91dfeee4a5b6 | [
"MIT"
] | 6 | 2020-05-13T16:16:15.000Z | 2020-06-23T12:05:09.000Z | import re
from typing import Tuple
from duty.utils import att_parse, format_response
from duty.objects import MySignalEvent, dp
def delete_template(name: str, templates: list) -> Tuple[list, bool]:
for template in templates:
if template['name'].lower() == name:
templates.remove(template)
return templates, True
return templates, False
def get_template_list(event: MySignalEvent, templates: list):
if len(event.args) > 1:
if event.args[-1].isdigit() or (event.args[-1].startswith('-') and event.args[-1][1:].isdigit()):
page = int(event.args.pop(-1))
if page > 0:
page -= 1
else:
page = 0
category = ' '.join(event.args).lower()
template_list = None
if not category:
cats = {}
for t in templates:
cats[t['cat']] = cats.get(t['cat'], 0) + 1
message = "📚 Категории {name_genitive}:"
for cat in cats:
message += f"\n-- {cat} ({cats[cat]})"
else:
if category == 'все':
message = '📃 Список всех {name_genitive}:'
category = None
else:
message = f'📖 {{name_accusative_cap}} категории "{category}":'
message += list_by_page(templates, page, category)
if '\n' not in message:
if templates == []:
message = '{no_templates}'
else:
message = '⚠️ {name_accusative_cap} по указанному запросу не найдены'
return message
def list_by_page(templates, page, category) -> str:
if len(templates) > 40:
if page >= 0:
message = f'(страница #{page+1})'
else:
message = f'(страница #{abs(page)} с конца)'
else:
message = ''
shift = page*40
sliced_list = templates[shift:shift+40] if shift >= 0 else templates[shift-1:shift+39]
if page < 0:
try:
sliced_list.append(templates[shift+39])
except IndexError:
pass
offset = (shift+1) if shift >= 0 else (len(templates)+shift)
for i, t in enumerate(sliced_list, offset):
if category:
if t['cat'] != category:
continue
message += f'\n-- {t["name"]}'
else:
message += f'\n{i}. {t["name"]} | {t["cat"]}'
if '\n' not in message:
return ''
return '\n' + message
@dp.longpoll_event_register('+шаб')
@dp.my_signal_event_register('+шаб')
def template_create(event: MySignalEvent) -> str:
name = re.findall(r"([^|]+)\|?([^|]*)", ' '.join(event.args))
if not name:
event.msg_op(2, "❗ Не указано название")
return "ok"
category = name[0][1].lower().strip() or 'без категории'
name = name[0][0].lower().strip()
if category == 'все':
event.msg_op(2, '❗ Невозможно создать шаблон с категорией "все"')
return "ok"
if not (event.payload or event.attachments or event.reply_message):
event.msg_op(2, "❗ Нет данных")
return "ok"
if event.reply_message:
data = event.reply_message['text']
event.attachments = att_parse(event.reply_message['attachments'])
if event.attachments:
if event.attachments[0].startswith('audio_message'):
event.msg_op(2, '⚠️ Для сохранения ГС используй команду "+гс"')
return "ok"
else:
data = event.payload
event.db.templates, exist = delete_template(name, event.db.templates)
event.db.templates.append({
"name": name,
"payload": data,
"cat": category,
"attachments": event.attachments
})
event.msg_op(2, f'✅ Шаблон "{name}" ' +
("перезаписан" if exist else "сохранен"), delete=2)
return "ok"
@dp.longpoll_event_register('шабы')
@dp.my_signal_event_register('шабы')
def template_list(event: MySignalEvent) -> str:
message = get_template_list(event, event.db.templates)
event.msg_op(2, format_response(message,
name_genitive='шаблонов',
name_accusative='шаблоны',
name_accusative_cap='Шаблоны',
no_templates='👀 Нет ни одного шаблона... Для создания используй команду "+шаб"'
))
return "ok"
def get_name(event: MySignalEvent) -> Tuple[MySignalEvent, str]:
return event, ' '.join(event.args).lower()
@dp.longpoll_event_register('-шаб')
@dp.my_signal_event_register('-шаб')
@dp.wrap_handler(get_name)
def template_delete(event: MySignalEvent, name: str) -> str:
event.db.templates, exist = delete_template(name, event.db.templates)
if exist:
msg = f'✅ Шаблон "{name}" удален'
else:
msg = f'⚠️ Шаблон "{name}" не найден'
event.msg_op(2, msg, delete=1)
return "ok"
@dp.longpoll_event_register('шаб')
@dp.my_signal_event_register('шаб')
@dp.wrap_handler(get_name)
def template_show(event: MySignalEvent, name: str) -> str:
template = None
for temp in event.db.templates:
if temp['name'] == name:
template = temp
break
if template:
atts = template['attachments']
atts.extend(event.attachments)
event.msg_op(2, temp['payload'] + '\n' + event.payload,
keep_forward_messages=1, attachment=','.join(atts))
else:
event.msg_op(2, f'❗ Шаблон "{name}" не найден')
return "ok"
| 32.384146 | 105 | 0.591226 | import re
from typing import Tuple
from duty.utils import att_parse, format_response
from duty.objects import MySignalEvent, dp
def delete_template(name: str, templates: list) -> Tuple[list, bool]:
for template in templates:
if template['name'].lower() == name:
templates.remove(template)
return templates, True
return templates, False
def get_template_list(event: MySignalEvent, templates: list):
if len(event.args) > 1:
if event.args[-1].isdigit() or (event.args[-1].startswith('-') and event.args[-1][1:].isdigit()):
page = int(event.args.pop(-1))
if page > 0:
page -= 1
else:
page = 0
category = ' '.join(event.args).lower()
template_list = None
if not category:
cats = {}
for t in templates:
cats[t['cat']] = cats.get(t['cat'], 0) + 1
message = "📚 Категории {name_genitive}:"
for cat in cats:
message += f"\n-- {cat} ({cats[cat]})"
else:
if category == 'все':
message = '📃 Список всех {name_genitive}:'
category = None
else:
message = f'📖 {{name_accusative_cap}} категории "{category}":'
message += list_by_page(templates, page, category)
if '\n' not in message:
if templates == []:
message = '{no_templates}'
else:
message = '⚠️ {name_accusative_cap} по указанному запросу не найдены'
return message
def list_by_page(templates, page, category) -> str:
if len(templates) > 40:
if page >= 0:
message = f'(страница #{page+1})'
else:
message = f'(страница #{abs(page)} с конца)'
else:
message = ''
shift = page*40
sliced_list = templates[shift:shift+40] if shift >= 0 else templates[shift-1:shift+39]
if page < 0:
try:
sliced_list.append(templates[shift+39])
except IndexError:
pass
offset = (shift+1) if shift >= 0 else (len(templates)+shift)
for i, t in enumerate(sliced_list, offset):
if category:
if t['cat'] != category:
continue
message += f'\n-- {t["name"]}'
else:
message += f'\n{i}. {t["name"]} | {t["cat"]}'
if '\n' not in message:
return ''
return '\n' + message
@dp.longpoll_event_register('+шаб')
@dp.my_signal_event_register('+шаб')
def template_create(event: MySignalEvent) -> str:
name = re.findall(r"([^|]+)\|?([^|]*)", ' '.join(event.args))
if not name:
event.msg_op(2, "❗ Не указано название")
return "ok"
category = name[0][1].lower().strip() or 'без категории'
name = name[0][0].lower().strip()
if category == 'все':
event.msg_op(2, '❗ Невозможно создать шаблон с категорией "все"')
return "ok"
if not (event.payload or event.attachments or event.reply_message):
event.msg_op(2, "❗ Нет данных")
return "ok"
if event.reply_message:
data = event.reply_message['text']
event.attachments = att_parse(event.reply_message['attachments'])
if event.attachments:
if event.attachments[0].startswith('audio_message'):
event.msg_op(2, '⚠️ Для сохранения ГС используй команду "+гс"')
return "ok"
else:
data = event.payload
event.db.templates, exist = delete_template(name, event.db.templates)
event.db.templates.append({
"name": name,
"payload": data,
"cat": category,
"attachments": event.attachments
})
event.msg_op(2, f'✅ Шаблон "{name}" ' +
("перезаписан" if exist else "сохранен"), delete=2)
return "ok"
@dp.longpoll_event_register('шабы')
@dp.my_signal_event_register('шабы')
def template_list(event: MySignalEvent) -> str:
message = get_template_list(event, event.db.templates)
event.msg_op(2, format_response(message,
name_genitive='шаблонов',
name_accusative='шаблоны',
name_accusative_cap='Шаблоны',
no_templates='👀 Нет ни одного шаблона... Для создания используй команду "+шаб"'
))
return "ok"
def get_name(event: MySignalEvent) -> Tuple[MySignalEvent, str]:
return event, ' '.join(event.args).lower()
@dp.longpoll_event_register('-шаб')
@dp.my_signal_event_register('-шаб')
@dp.wrap_handler(get_name)
def template_delete(event: MySignalEvent, name: str) -> str:
event.db.templates, exist = delete_template(name, event.db.templates)
if exist:
msg = f'✅ Шаблон "{name}" удален'
else:
msg = f'⚠️ Шаблон "{name}" не найден'
event.msg_op(2, msg, delete=1)
return "ok"
@dp.longpoll_event_register('шаб')
@dp.my_signal_event_register('шаб')
@dp.wrap_handler(get_name)
def template_show(event: MySignalEvent, name: str) -> str:
template = None
for temp in event.db.templates:
if temp['name'] == name:
template = temp
break
if template:
atts = template['attachments']
atts.extend(event.attachments)
event.msg_op(2, temp['payload'] + '\n' + event.payload,
keep_forward_messages=1, attachment=','.join(atts))
else:
event.msg_op(2, f'❗ Шаблон "{name}" не найден')
return "ok"
| true | true |
f71bc2338b924e040d6847a96eaeabd0904e440e | 405 | py | Python | env/Lib/site-packages/plotly/validators/scatter3d/marker/_sizeref.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/scatter3d/marker/_sizeref.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/scatter3d/marker/_sizeref.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class SizerefValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizeref", parent_name="scatter3d.marker", **kwargs):
super(SizerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 33.75 | 88 | 0.679012 | import _plotly_utils.basevalidators
class SizerefValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizeref", parent_name="scatter3d.marker", **kwargs):
super(SizerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| true | true |
f71bc27ffeefbaa07e53e1ac517b08578d7e83f3 | 35,940 | py | Python | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/traffic_director.py | lungati/grpc | cd9730d2d28626c57431253208f23507d466c825 | [
"BSD-3-Clause"
] | 5 | 2019-11-12T04:30:55.000Z | 2021-08-11T23:04:12.000Z | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/traffic_director.py | bwncp/grpc | 779701ab76c552affa9f5c7815c2b598c996ea54 | [
"Apache-2.0"
] | 10 | 2015-03-03T06:51:51.000Z | 2022-03-23T14:10:56.000Z | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/traffic_director.py | bwncp/grpc | 779701ab76c552affa9f5c7815c2b598c996ea54 | [
"Apache-2.0"
] | 1 | 2015-08-22T15:20:59.000Z | 2015-08-22T15:20:59.000Z | # Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import random
from typing import Any, Dict, List, Optional, Set
from framework import xds_flags
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
# Compute
_ComputeV1 = gcp.compute.ComputeV1
GcpResource = _ComputeV1.GcpResource
HealthCheckProtocol = _ComputeV1.HealthCheckProtocol
ZonalGcpResource = _ComputeV1.ZonalGcpResource
BackendServiceProtocol = _ComputeV1.BackendServiceProtocol
_BackendGRPC = BackendServiceProtocol.GRPC
_HealthCheckGRPC = HealthCheckProtocol.GRPC
# Network Security
_NetworkSecurityV1Beta1 = gcp.network_security.NetworkSecurityV1Beta1
ServerTlsPolicy = gcp.network_security.ServerTlsPolicy
ClientTlsPolicy = gcp.network_security.ClientTlsPolicy
AuthorizationPolicy = gcp.network_security.AuthorizationPolicy
# Network Services
_NetworkServicesV1Alpha1 = gcp.network_services.NetworkServicesV1Alpha1
_NetworkServicesV1Beta1 = gcp.network_services.NetworkServicesV1Beta1
EndpointPolicy = gcp.network_services.EndpointPolicy
# Testing metadata consts
TEST_AFFINITY_METADATA_KEY = 'xds_md'
class TrafficDirectorManager:
compute: _ComputeV1
resource_prefix: str
resource_suffix: str
BACKEND_SERVICE_NAME = "backend-service"
ALTERNATIVE_BACKEND_SERVICE_NAME = "backend-service-alt"
AFFINITY_BACKEND_SERVICE_NAME = "backend-service-affinity"
HEALTH_CHECK_NAME = "health-check"
URL_MAP_NAME = "url-map"
URL_MAP_PATH_MATCHER_NAME = "path-matcher"
TARGET_PROXY_NAME = "target-proxy"
FORWARDING_RULE_NAME = "forwarding-rule"
FIREWALL_RULE_NAME = "allow-health-checks"
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: str,
network: str = 'default',
compute_api_version: str = 'v1',
):
# API
self.compute = _ComputeV1(gcp_api_manager,
project,
version=compute_api_version)
# Settings
self.project: str = project
self.network: str = network
self.resource_prefix: str = resource_prefix
self.resource_suffix: str = resource_suffix
# Managed resources
self.health_check: Optional[GcpResource] = None
self.backend_service: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once backend service resource loaded
self.backend_service_protocol: Optional[BackendServiceProtocol] = None
self.url_map: Optional[GcpResource] = None
self.firewall_rule: Optional[GcpResource] = None
self.target_proxy: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once target proxy resource loaded
self.target_proxy_is_http: bool = False
self.forwarding_rule: Optional[GcpResource] = None
self.backends: Set[ZonalGcpResource] = set()
self.alternative_backend_service: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once backend service resource loaded
self.alternative_backend_service_protocol: Optional[
BackendServiceProtocol] = None
self.alternative_backends: Set[ZonalGcpResource] = set()
self.affinity_backend_service: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once backend service resource loaded
self.affinity_backend_service_protocol: Optional[
BackendServiceProtocol] = None
self.affinity_backends: Set[ZonalGcpResource] = set()
@property
def network_url(self):
return f'global/networks/{self.network}'
def setup_for_grpc(
self,
service_host,
service_port,
*,
backend_protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
health_check_port: Optional[int] = None):
self.setup_backend_for_grpc(protocol=backend_protocol,
health_check_port=health_check_port)
self.setup_routing_rule_map_for_grpc(service_host, service_port)
def setup_backend_for_grpc(
self,
*,
protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
health_check_port: Optional[int] = None):
self.create_health_check(port=health_check_port)
self.create_backend_service(protocol)
def setup_routing_rule_map_for_grpc(self, service_host, service_port):
self.create_url_map(service_host, service_port)
self.create_target_proxy()
self.create_forwarding_rule(service_port)
def cleanup(self, *, force=False):
# Cleanup in the reverse order of creation
self.delete_forwarding_rule(force=force)
self.delete_target_http_proxy(force=force)
self.delete_target_grpc_proxy(force=force)
self.delete_url_map(force=force)
self.delete_backend_service(force=force)
self.delete_alternative_backend_service(force=force)
self.delete_affinity_backend_service(force=force)
self.delete_health_check(force=force)
@functools.lru_cache(None)
def make_resource_name(self, name: str) -> str:
"""Make dash-separated resource name with resource prefix and suffix."""
parts = [self.resource_prefix, name]
# Avoid trailing dash when the suffix is empty.
if self.resource_suffix:
parts.append(self.resource_suffix)
return '-'.join(parts)
def create_health_check(
self,
*,
protocol: Optional[HealthCheckProtocol] = _HealthCheckGRPC,
port: Optional[int] = None):
if self.health_check:
raise ValueError(f'Health check {self.health_check.name} '
'already created, delete it first')
if protocol is None:
protocol = _HealthCheckGRPC
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
logger.info('Creating %s Health Check "%s"', protocol.name, name)
resource = self.compute.create_health_check(name, protocol, port=port)
self.health_check = resource
def delete_health_check(self, force=False):
if force:
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
elif self.health_check:
name = self.health_check.name
else:
return
logger.info('Deleting Health Check "%s"', name)
self.compute.delete_health_check(name)
self.health_check = None
def create_backend_service(
self,
protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
subset_size: Optional[int] = None,
affinity_header: Optional[str] = None):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
logger.info('Creating %s Backend Service "%s"', protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
name,
health_check=self.health_check,
protocol=protocol,
subset_size=subset_size,
affinity_header=affinity_header)
self.backend_service = resource
self.backend_service_protocol = protocol
def load_backend_service(self):
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.backend_service = resource
def delete_backend_service(self, force=False):
if force:
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
elif self.backend_service:
name = self.backend_service.name
else:
return
logger.info('Deleting Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.backend_service = None
def backend_service_add_neg_backends(self,
name,
zones,
max_rate_per_endpoint: Optional[
int] = None):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.backends.add(backend)
self.backend_service_patch_backends(max_rate_per_endpoint)
def backend_service_remove_neg_backends(self, name, zones):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.backends.remove(backend)
self.backend_service_patch_backends()
def backend_service_patch_backends(
self, max_rate_per_endpoint: Optional[int] = None):
logging.info('Adding backends to Backend Service %s: %r',
self.backend_service.name, self.backends)
self.compute.backend_service_patch_backends(self.backend_service,
self.backends,
max_rate_per_endpoint)
def backend_service_remove_all_backends(self):
logging.info('Removing backends from Backend Service %s',
self.backend_service.name)
self.compute.backend_service_remove_all_backends(self.backend_service)
def wait_for_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.backend_service, self.backends)
self.compute.wait_for_backends_healthy_status(self.backend_service,
self.backends)
def create_alternative_backend_service(
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
logger.info('Creating %s Alternative Backend Service "%s"',
protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
name, health_check=self.health_check, protocol=protocol)
self.alternative_backend_service = resource
self.alternative_backend_service_protocol = protocol
def load_alternative_backend_service(self):
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.alternative_backend_service = resource
def delete_alternative_backend_service(self, force=False):
if force:
name = self.make_resource_name(
self.ALTERNATIVE_BACKEND_SERVICE_NAME)
elif self.alternative_backend_service:
name = self.alternative_backend_service.name
else:
return
logger.info('Deleting Alternative Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.alternative_backend_service = None
def alternative_backend_service_add_neg_backends(self, name, zones):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.alternative_backends.add(backend)
self.alternative_backend_service_patch_backends()
def alternative_backend_service_patch_backends(self):
logging.info('Adding backends to Backend Service %s: %r',
self.alternative_backend_service.name,
self.alternative_backends)
self.compute.backend_service_patch_backends(
self.alternative_backend_service, self.alternative_backends)
def alternative_backend_service_remove_all_backends(self):
logging.info('Removing backends from Backend Service %s',
self.alternative_backend_service.name)
self.compute.backend_service_remove_all_backends(
self.alternative_backend_service)
def wait_for_alternative_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.alternative_backend_service, self.alternative_backends)
self.compute.wait_for_backends_healthy_status(
self.alternative_backend_service, self.alternative_backends)
def create_affinity_backend_service(
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
logger.info('Creating %s Affinity Backend Service "%s"', protocol.name,
name)
resource = self.compute.create_backend_service_traffic_director(
name,
health_check=self.health_check,
protocol=protocol,
affinity_header=TEST_AFFINITY_METADATA_KEY)
self.affinity_backend_service = resource
self.affinity_backend_service_protocol = protocol
def load_affinity_backend_service(self):
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.affinity_backend_service = resource
def delete_affinity_backend_service(self, force=False):
if force:
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
elif self.affinity_backend_service:
name = self.affinity_backend_service.name
else:
return
logger.info('Deleting Affinity Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.affinity_backend_service = None
def affinity_backend_service_add_neg_backends(self, name, zones):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.affinity_backends.add(backend)
self.affinity_backend_service_patch_backends()
def affinity_backend_service_patch_backends(self):
logging.info('Adding backends to Backend Service %s: %r',
self.affinity_backend_service.name, self.affinity_backends)
self.compute.backend_service_patch_backends(
self.affinity_backend_service, self.affinity_backends)
def affinity_backend_service_remove_all_backends(self):
logging.info('Removing backends from Backend Service %s',
self.affinity_backend_service.name)
self.compute.backend_service_remove_all_backends(
self.affinity_backend_service)
def wait_for_affinity_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.affinity_backend_service, self.affinity_backends)
self.compute.wait_for_backends_healthy_status(
self.affinity_backend_service, self.affinity_backends)
def _generate_url_map_body(
self,
name: str,
matcher_name: str,
src_hosts,
dst_default_backend_service: GcpResource,
dst_host_rule_match_backend_service: Optional[GcpResource] = None,
) -> Dict[str, Any]:
if dst_host_rule_match_backend_service is None:
dst_host_rule_match_backend_service = dst_default_backend_service
return {
'name':
name,
'defaultService':
dst_default_backend_service.url,
'hostRules': [{
'hosts': src_hosts,
'pathMatcher': matcher_name,
}],
'pathMatchers': [{
'name': matcher_name,
'defaultService': dst_host_rule_match_backend_service.url,
}],
}
def create_url_map(
self,
src_host: str,
src_port: int,
) -> GcpResource:
src_address = f'{src_host}:{src_port}'
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info('Creating URL map "%s": %s -> %s', name, src_address,
self.backend_service.name)
resource = self.compute.create_url_map_with_content(
self._generate_url_map_body(name, matcher_name, [src_address],
self.backend_service))
self.url_map = resource
return resource
def patch_url_map(self, src_host: str, src_port: int,
backend_service: GcpResource):
src_address = f'{src_host}:{src_port}'
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info('Patching URL map "%s": %s -> %s', name, src_address,
backend_service.name)
self.compute.patch_url_map(
self.url_map,
self._generate_url_map_body(name, matcher_name, [src_address],
backend_service))
def create_url_map_with_content(self, url_map_body: Any) -> GcpResource:
logger.info('Creating URL map: %s', url_map_body)
resource = self.compute.create_url_map_with_content(url_map_body)
self.url_map = resource
return resource
def delete_url_map(self, force=False):
if force:
name = self.make_resource_name(self.URL_MAP_NAME)
elif self.url_map:
name = self.url_map.name
else:
return
logger.info('Deleting URL Map "%s"', name)
self.compute.delete_url_map(name)
self.url_map = None
def create_target_proxy(self):
name = self.make_resource_name(self.TARGET_PROXY_NAME)
if self.backend_service_protocol is BackendServiceProtocol.GRPC:
target_proxy_type = 'GRPC'
create_proxy_fn = self.compute.create_target_grpc_proxy
self.target_proxy_is_http = False
elif self.backend_service_protocol is BackendServiceProtocol.HTTP2:
target_proxy_type = 'HTTP'
create_proxy_fn = self.compute.create_target_http_proxy
self.target_proxy_is_http = True
else:
raise TypeError('Unexpected backend service protocol')
logger.info('Creating target %s proxy "%s" to URL map %s', name,
target_proxy_type, self.url_map.name)
self.target_proxy = create_proxy_fn(name, self.url_map)
def delete_target_grpc_proxy(self, force=False):
if force:
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy:
name = self.target_proxy.name
else:
return
logger.info('Deleting Target GRPC proxy "%s"', name)
self.compute.delete_target_grpc_proxy(name)
self.target_proxy = None
self.target_proxy_is_http = False
def delete_target_http_proxy(self, force=False):
if force:
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy and self.target_proxy_is_http:
name = self.target_proxy.name
else:
return
logger.info('Deleting HTTP Target proxy "%s"', name)
self.compute.delete_target_http_proxy(name)
self.target_proxy = None
self.target_proxy_is_http = False
def find_unused_forwarding_rule_port(
self,
*,
lo: int = 1024, # To avoid confusion, skip well-known ports.
hi: int = 65535,
attempts: int = 25) -> int:
for attempts in range(attempts):
src_port = random.randint(lo, hi)
if not (self.compute.exists_forwarding_rule(src_port)):
return src_port
# TODO(sergiitk): custom exception
raise RuntimeError("Couldn't find unused forwarding rule port")
def create_forwarding_rule(self, src_port: int):
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
src_port = int(src_port)
logging.info(
'Creating forwarding rule "%s" in network "%s": 0.0.0.0:%s -> %s',
name, self.network, src_port, self.target_proxy.url)
resource = self.compute.create_forwarding_rule(name, src_port,
self.target_proxy,
self.network_url)
self.forwarding_rule = resource
return resource
def delete_forwarding_rule(self, force=False):
if force:
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
elif self.forwarding_rule:
name = self.forwarding_rule.name
else:
return
logger.info('Deleting Forwarding rule "%s"', name)
self.compute.delete_forwarding_rule(name)
self.forwarding_rule = None
def create_firewall_rule(self, allowed_ports: List[str]):
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
logging.info(
'Creating firewall rule "%s" in network "%s" with allowed ports %s',
name, self.network, allowed_ports)
resource = self.compute.create_firewall_rule(
name, self.network_url, xds_flags.FIREWALL_SOURCE_RANGE.value,
allowed_ports)
self.firewall_rule = resource
def delete_firewall_rule(self, force=False):
"""The firewall rule won't be automatically removed."""
if force:
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
elif self.firewall_rule:
name = self.firewall_rule.name
else:
return
logger.info('Deleting Firewall Rule "%s"', name)
self.compute.delete_firewall_rule(name)
self.firewall_rule = None
class TrafficDirectorAppNetManager(TrafficDirectorManager):
GRPC_ROUTE_NAME = "grpc-route"
ROUTER_NAME = "router"
netsvc: _NetworkServicesV1Alpha1
def __init__(self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
config_scope: str,
resource_suffix: Optional[str] = None,
network: str = 'default',
compute_api_version: str = 'v1'):
super().__init__(gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network,
compute_api_version=compute_api_version)
self.config_scope = config_scope
# API
self.netsvc = _NetworkServicesV1Alpha1(gcp_api_manager, project)
# Managed resources
self.grpc_route: Optional[_NetworkServicesV1Alpha1.GrpcRoute] = None
self.router: Optional[_NetworkServicesV1Alpha1.Router] = None
def create_router(self) -> GcpResource:
name = self.make_resource_name(self.ROUTER_NAME)
logger.info("Creating Router %s", name)
body = {
"type": "PROXYLESS_GRPC",
"scope": self.config_scope,
}
resource = self.netsvc.create_router(name, body)
self.router = self.netsvc.get_router(name)
logger.debug("Loaded Router: %s", self.router)
return resource
def delete_router(self, force=False):
if force:
name = self.make_resource_name(self.ROUTER_NAME)
elif self.router:
name = self.router.name
else:
return
logger.info('Deleting Router %s', name)
self.netsvc.delete_router(name)
self.router = None
def create_grpc_route(self, src_host: str, src_port: int) -> GcpResource:
host = f'{src_host}:{src_port}'
service_name = self.netsvc.resource_full_name(self.backend_service.name,
"backendServices")
body = {
"routers": [self.router.url],
"hostnames":
host,
"rules": [{
"action": {
"destinations": [{
"serviceName": service_name
}]
}
}],
}
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
logger.info("Creating GrpcRoute %s", name)
resource = self.netsvc.create_grpc_route(name, body)
self.grpc_route = self.netsvc.get_grpc_route(name)
logger.debug("Loaded GrpcRoute: %s", self.grpc_route)
return resource
def create_grpc_route_with_content(self, body: Any) -> GcpResource:
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
logger.info("Creating GrpcRoute %s", name)
resource = self.netsvc.create_grpc_route(name, body)
self.grpc_route = self.netsvc.get_grpc_route(name)
logger.debug("Loaded GrpcRoute: %s", self.grpc_route)
return resource
def delete_grpc_route(self, force=False):
if force:
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
elif self.grpc_route:
name = self.grpc_route.name
else:
return
logger.info('Deleting GrpcRoute %s', name)
self.netsvc.delete_grpc_route(name)
self.grpc_route = None
def cleanup(self, *, force=False):
self.delete_grpc_route(force=force)
self.delete_router(force=force)
super().cleanup(force=force)
class TrafficDirectorSecureManager(TrafficDirectorManager):
SERVER_TLS_POLICY_NAME = "server-tls-policy"
CLIENT_TLS_POLICY_NAME = "client-tls-policy"
AUTHZ_POLICY_NAME = "authz-policy"
ENDPOINT_POLICY = "endpoint-policy"
CERTIFICATE_PROVIDER_INSTANCE = "google_cloud_private_spiffe"
netsec: _NetworkSecurityV1Beta1
netsvc: _NetworkServicesV1Beta1
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: Optional[str] = None,
network: str = 'default',
compute_api_version: str = 'v1',
):
super().__init__(gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network,
compute_api_version=compute_api_version)
# API
self.netsec = _NetworkSecurityV1Beta1(gcp_api_manager, project)
self.netsvc = _NetworkServicesV1Beta1(gcp_api_manager, project)
# Managed resources
self.server_tls_policy: Optional[ServerTlsPolicy] = None
self.client_tls_policy: Optional[ClientTlsPolicy] = None
self.authz_policy: Optional[AuthorizationPolicy] = None
self.endpoint_policy: Optional[EndpointPolicy] = None
def setup_server_security(self,
*,
server_namespace,
server_name,
server_port,
tls=True,
mtls=True):
self.create_server_tls_policy(tls=tls, mtls=mtls)
self.create_endpoint_policy(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port)
def setup_client_security(self,
*,
server_namespace,
server_name,
tls=True,
mtls=True):
self.create_client_tls_policy(tls=tls, mtls=mtls)
self.backend_service_apply_client_mtls_policy(server_namespace,
server_name)
def cleanup(self, *, force=False):
# Cleanup in the reverse order of creation
super().cleanup(force=force)
self.delete_endpoint_policy(force=force)
self.delete_server_tls_policy(force=force)
self.delete_client_tls_policy(force=force)
self.delete_authz_policy(force=force)
def create_server_tls_policy(self, *, tls, mtls):
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
logger.info('Creating Server TLS Policy %s', name)
if not tls and not mtls:
logger.warning(
'Server TLS Policy %s neither TLS, nor mTLS '
'policy. Skipping creation', name)
return
certificate_provider = self._get_certificate_provider()
policy = {}
if tls:
policy["serverCertificate"] = certificate_provider
if mtls:
policy["mtlsPolicy"] = {
"clientValidationCa": [certificate_provider],
}
self.netsec.create_server_tls_policy(name, policy)
self.server_tls_policy = self.netsec.get_server_tls_policy(name)
logger.debug('Server TLS Policy loaded: %r', self.server_tls_policy)
def delete_server_tls_policy(self, force=False):
if force:
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
elif self.server_tls_policy:
name = self.server_tls_policy.name
else:
return
logger.info('Deleting Server TLS Policy %s', name)
self.netsec.delete_server_tls_policy(name)
self.server_tls_policy = None
def create_authz_policy(self, *, action: str, rules: list):
name = self.make_resource_name(self.AUTHZ_POLICY_NAME)
logger.info('Creating Authz Policy %s', name)
policy = {
"action": action,
"rules": rules,
}
self.netsec.create_authz_policy(name, policy)
self.authz_policy = self.netsec.get_authz_policy(name)
logger.debug('Authz Policy loaded: %r', self.authz_policy)
def delete_authz_policy(self, force=False):
if force:
name = self.make_resource_name(self.AUTHZ_POLICY_NAME)
elif self.authz_policy:
name = self.authz_policy.name
else:
return
logger.info('Deleting Authz Policy %s', name)
self.netsec.delete_authz_policy(name)
self.authz_policy = None
def create_endpoint_policy(self, *, server_namespace: str, server_name: str,
server_port: int) -> None:
name = self.make_resource_name(self.ENDPOINT_POLICY)
logger.info('Creating Endpoint Policy %s', name)
endpoint_matcher_labels = [{
"labelName": "app",
"labelValue": f"{server_namespace}-{server_name}"
}]
port_selector = {"ports": [str(server_port)]}
label_matcher_all = {
"metadataLabelMatchCriteria": "MATCH_ALL",
"metadataLabels": endpoint_matcher_labels,
}
config = {
"type": "GRPC_SERVER",
"trafficPortSelector": port_selector,
"endpointMatcher": {
"metadataLabelMatcher": label_matcher_all,
},
}
if self.server_tls_policy:
config["serverTlsPolicy"] = self.server_tls_policy.name
else:
logger.warning(
'Creating Endpoint Policy %s with '
'no Server TLS policy attached', name)
if self.authz_policy:
config["authorizationPolicy"] = self.authz_policy.name
self.netsvc.create_endpoint_policy(name, config)
self.endpoint_policy = self.netsvc.get_endpoint_policy(name)
logger.debug('Loaded Endpoint Policy: %r', self.endpoint_policy)
def delete_endpoint_policy(self, force: bool = False) -> None:
if force:
name = self.make_resource_name(self.ENDPOINT_POLICY)
elif self.endpoint_policy:
name = self.endpoint_policy.name
else:
return
logger.info('Deleting Endpoint Policy %s', name)
self.netsvc.delete_endpoint_policy(name)
self.endpoint_policy = None
def create_client_tls_policy(self, *, tls, mtls):
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
logger.info('Creating Client TLS Policy %s', name)
if not tls and not mtls:
logger.warning(
'Client TLS Policy %s neither TLS, nor mTLS '
'policy. Skipping creation', name)
return
certificate_provider = self._get_certificate_provider()
policy = {}
if tls:
policy["serverValidationCa"] = [certificate_provider]
if mtls:
policy["clientCertificate"] = certificate_provider
self.netsec.create_client_tls_policy(name, policy)
self.client_tls_policy = self.netsec.get_client_tls_policy(name)
logger.debug('Client TLS Policy loaded: %r', self.client_tls_policy)
def delete_client_tls_policy(self, force=False):
if force:
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
elif self.client_tls_policy:
name = self.client_tls_policy.name
else:
return
logger.info('Deleting Client TLS Policy %s', name)
self.netsec.delete_client_tls_policy(name)
self.client_tls_policy = None
def backend_service_apply_client_mtls_policy(
self,
server_namespace,
server_name,
):
if not self.client_tls_policy:
logger.warning(
'Client TLS policy not created, '
'skipping attaching to Backend Service %s',
self.backend_service.name)
return
server_spiffe = (f'spiffe://{self.project}.svc.id.goog/'
f'ns/{server_namespace}/sa/{server_name}')
logging.info(
'Adding Client TLS Policy to Backend Service %s: %s, '
'server %s', self.backend_service.name, self.client_tls_policy.url,
server_spiffe)
self.compute.patch_backend_service(
self.backend_service, {
'securitySettings': {
'clientTlsPolicy': self.client_tls_policy.url,
'subjectAltNames': [server_spiffe]
}
})
@classmethod
def _get_certificate_provider(cls):
return {
"certificateProviderInstance": {
"pluginInstance": cls.CERTIFICATE_PROVIDER_INSTANCE,
},
}
| 40.748299 | 80 | 0.636171 |
import functools
import logging
import random
from typing import Any, Dict, List, Optional, Set
from framework import xds_flags
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
_ComputeV1 = gcp.compute.ComputeV1
GcpResource = _ComputeV1.GcpResource
HealthCheckProtocol = _ComputeV1.HealthCheckProtocol
ZonalGcpResource = _ComputeV1.ZonalGcpResource
BackendServiceProtocol = _ComputeV1.BackendServiceProtocol
_BackendGRPC = BackendServiceProtocol.GRPC
_HealthCheckGRPC = HealthCheckProtocol.GRPC
_NetworkSecurityV1Beta1 = gcp.network_security.NetworkSecurityV1Beta1
ServerTlsPolicy = gcp.network_security.ServerTlsPolicy
ClientTlsPolicy = gcp.network_security.ClientTlsPolicy
AuthorizationPolicy = gcp.network_security.AuthorizationPolicy
_NetworkServicesV1Alpha1 = gcp.network_services.NetworkServicesV1Alpha1
_NetworkServicesV1Beta1 = gcp.network_services.NetworkServicesV1Beta1
EndpointPolicy = gcp.network_services.EndpointPolicy
TEST_AFFINITY_METADATA_KEY = 'xds_md'
class TrafficDirectorManager:
compute: _ComputeV1
resource_prefix: str
resource_suffix: str
BACKEND_SERVICE_NAME = "backend-service"
ALTERNATIVE_BACKEND_SERVICE_NAME = "backend-service-alt"
AFFINITY_BACKEND_SERVICE_NAME = "backend-service-affinity"
HEALTH_CHECK_NAME = "health-check"
URL_MAP_NAME = "url-map"
URL_MAP_PATH_MATCHER_NAME = "path-matcher"
TARGET_PROXY_NAME = "target-proxy"
FORWARDING_RULE_NAME = "forwarding-rule"
FIREWALL_RULE_NAME = "allow-health-checks"
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: str,
network: str = 'default',
compute_api_version: str = 'v1',
):
self.compute = _ComputeV1(gcp_api_manager,
project,
version=compute_api_version)
self.project: str = project
self.network: str = network
self.resource_prefix: str = resource_prefix
self.resource_suffix: str = resource_suffix
self.health_check: Optional[GcpResource] = None
self.backend_service: Optional[GcpResource] = None
self.backend_service_protocol: Optional[BackendServiceProtocol] = None
self.url_map: Optional[GcpResource] = None
self.firewall_rule: Optional[GcpResource] = None
self.target_proxy: Optional[GcpResource] = None
self.target_proxy_is_http: bool = False
self.forwarding_rule: Optional[GcpResource] = None
self.backends: Set[ZonalGcpResource] = set()
self.alternative_backend_service: Optional[GcpResource] = None
self.alternative_backend_service_protocol: Optional[
BackendServiceProtocol] = None
self.alternative_backends: Set[ZonalGcpResource] = set()
self.affinity_backend_service: Optional[GcpResource] = None
self.affinity_backend_service_protocol: Optional[
BackendServiceProtocol] = None
self.affinity_backends: Set[ZonalGcpResource] = set()
@property
def network_url(self):
return f'global/networks/{self.network}'
def setup_for_grpc(
self,
service_host,
service_port,
*,
backend_protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
health_check_port: Optional[int] = None):
self.setup_backend_for_grpc(protocol=backend_protocol,
health_check_port=health_check_port)
self.setup_routing_rule_map_for_grpc(service_host, service_port)
def setup_backend_for_grpc(
self,
*,
protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
health_check_port: Optional[int] = None):
self.create_health_check(port=health_check_port)
self.create_backend_service(protocol)
def setup_routing_rule_map_for_grpc(self, service_host, service_port):
self.create_url_map(service_host, service_port)
self.create_target_proxy()
self.create_forwarding_rule(service_port)
def cleanup(self, *, force=False):
self.delete_forwarding_rule(force=force)
self.delete_target_http_proxy(force=force)
self.delete_target_grpc_proxy(force=force)
self.delete_url_map(force=force)
self.delete_backend_service(force=force)
self.delete_alternative_backend_service(force=force)
self.delete_affinity_backend_service(force=force)
self.delete_health_check(force=force)
@functools.lru_cache(None)
def make_resource_name(self, name: str) -> str:
parts = [self.resource_prefix, name]
if self.resource_suffix:
parts.append(self.resource_suffix)
return '-'.join(parts)
def create_health_check(
self,
*,
protocol: Optional[HealthCheckProtocol] = _HealthCheckGRPC,
port: Optional[int] = None):
if self.health_check:
raise ValueError(f'Health check {self.health_check.name} '
'already created, delete it first')
if protocol is None:
protocol = _HealthCheckGRPC
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
logger.info('Creating %s Health Check "%s"', protocol.name, name)
resource = self.compute.create_health_check(name, protocol, port=port)
self.health_check = resource
def delete_health_check(self, force=False):
if force:
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
elif self.health_check:
name = self.health_check.name
else:
return
logger.info('Deleting Health Check "%s"', name)
self.compute.delete_health_check(name)
self.health_check = None
def create_backend_service(
self,
protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
subset_size: Optional[int] = None,
affinity_header: Optional[str] = None):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
logger.info('Creating %s Backend Service "%s"', protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
name,
health_check=self.health_check,
protocol=protocol,
subset_size=subset_size,
affinity_header=affinity_header)
self.backend_service = resource
self.backend_service_protocol = protocol
def load_backend_service(self):
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.backend_service = resource
def delete_backend_service(self, force=False):
if force:
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
elif self.backend_service:
name = self.backend_service.name
else:
return
logger.info('Deleting Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.backend_service = None
def backend_service_add_neg_backends(self,
name,
zones,
max_rate_per_endpoint: Optional[
int] = None):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.backends.add(backend)
self.backend_service_patch_backends(max_rate_per_endpoint)
def backend_service_remove_neg_backends(self, name, zones):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.backends.remove(backend)
self.backend_service_patch_backends()
def backend_service_patch_backends(
self, max_rate_per_endpoint: Optional[int] = None):
logging.info('Adding backends to Backend Service %s: %r',
self.backend_service.name, self.backends)
self.compute.backend_service_patch_backends(self.backend_service,
self.backends,
max_rate_per_endpoint)
def backend_service_remove_all_backends(self):
logging.info('Removing backends from Backend Service %s',
self.backend_service.name)
self.compute.backend_service_remove_all_backends(self.backend_service)
def wait_for_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.backend_service, self.backends)
self.compute.wait_for_backends_healthy_status(self.backend_service,
self.backends)
def create_alternative_backend_service(
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
logger.info('Creating %s Alternative Backend Service "%s"',
protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
name, health_check=self.health_check, protocol=protocol)
self.alternative_backend_service = resource
self.alternative_backend_service_protocol = protocol
def load_alternative_backend_service(self):
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.alternative_backend_service = resource
def delete_alternative_backend_service(self, force=False):
if force:
name = self.make_resource_name(
self.ALTERNATIVE_BACKEND_SERVICE_NAME)
elif self.alternative_backend_service:
name = self.alternative_backend_service.name
else:
return
logger.info('Deleting Alternative Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.alternative_backend_service = None
def alternative_backend_service_add_neg_backends(self, name, zones):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.alternative_backends.add(backend)
self.alternative_backend_service_patch_backends()
def alternative_backend_service_patch_backends(self):
logging.info('Adding backends to Backend Service %s: %r',
self.alternative_backend_service.name,
self.alternative_backends)
self.compute.backend_service_patch_backends(
self.alternative_backend_service, self.alternative_backends)
def alternative_backend_service_remove_all_backends(self):
logging.info('Removing backends from Backend Service %s',
self.alternative_backend_service.name)
self.compute.backend_service_remove_all_backends(
self.alternative_backend_service)
def wait_for_alternative_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.alternative_backend_service, self.alternative_backends)
self.compute.wait_for_backends_healthy_status(
self.alternative_backend_service, self.alternative_backends)
def create_affinity_backend_service(
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
logger.info('Creating %s Affinity Backend Service "%s"', protocol.name,
name)
resource = self.compute.create_backend_service_traffic_director(
name,
health_check=self.health_check,
protocol=protocol,
affinity_header=TEST_AFFINITY_METADATA_KEY)
self.affinity_backend_service = resource
self.affinity_backend_service_protocol = protocol
def load_affinity_backend_service(self):
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.affinity_backend_service = resource
def delete_affinity_backend_service(self, force=False):
if force:
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
elif self.affinity_backend_service:
name = self.affinity_backend_service.name
else:
return
logger.info('Deleting Affinity Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.affinity_backend_service = None
def affinity_backend_service_add_neg_backends(self, name, zones):
logger.info('Waiting for Network Endpoint Groups to load endpoints.')
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info('Loaded NEG "%s" in zone %s', backend.name,
backend.zone)
self.affinity_backends.add(backend)
self.affinity_backend_service_patch_backends()
def affinity_backend_service_patch_backends(self):
logging.info('Adding backends to Backend Service %s: %r',
self.affinity_backend_service.name, self.affinity_backends)
self.compute.backend_service_patch_backends(
self.affinity_backend_service, self.affinity_backends)
def affinity_backend_service_remove_all_backends(self):
logging.info('Removing backends from Backend Service %s',
self.affinity_backend_service.name)
self.compute.backend_service_remove_all_backends(
self.affinity_backend_service)
def wait_for_affinity_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.affinity_backend_service, self.affinity_backends)
self.compute.wait_for_backends_healthy_status(
self.affinity_backend_service, self.affinity_backends)
def _generate_url_map_body(
self,
name: str,
matcher_name: str,
src_hosts,
dst_default_backend_service: GcpResource,
dst_host_rule_match_backend_service: Optional[GcpResource] = None,
) -> Dict[str, Any]:
if dst_host_rule_match_backend_service is None:
dst_host_rule_match_backend_service = dst_default_backend_service
return {
'name':
name,
'defaultService':
dst_default_backend_service.url,
'hostRules': [{
'hosts': src_hosts,
'pathMatcher': matcher_name,
}],
'pathMatchers': [{
'name': matcher_name,
'defaultService': dst_host_rule_match_backend_service.url,
}],
}
def create_url_map(
self,
src_host: str,
src_port: int,
) -> GcpResource:
src_address = f'{src_host}:{src_port}'
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info('Creating URL map "%s": %s -> %s', name, src_address,
self.backend_service.name)
resource = self.compute.create_url_map_with_content(
self._generate_url_map_body(name, matcher_name, [src_address],
self.backend_service))
self.url_map = resource
return resource
def patch_url_map(self, src_host: str, src_port: int,
backend_service: GcpResource):
src_address = f'{src_host}:{src_port}'
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info('Patching URL map "%s": %s -> %s', name, src_address,
backend_service.name)
self.compute.patch_url_map(
self.url_map,
self._generate_url_map_body(name, matcher_name, [src_address],
backend_service))
def create_url_map_with_content(self, url_map_body: Any) -> GcpResource:
logger.info('Creating URL map: %s', url_map_body)
resource = self.compute.create_url_map_with_content(url_map_body)
self.url_map = resource
return resource
def delete_url_map(self, force=False):
if force:
name = self.make_resource_name(self.URL_MAP_NAME)
elif self.url_map:
name = self.url_map.name
else:
return
logger.info('Deleting URL Map "%s"', name)
self.compute.delete_url_map(name)
self.url_map = None
def create_target_proxy(self):
name = self.make_resource_name(self.TARGET_PROXY_NAME)
if self.backend_service_protocol is BackendServiceProtocol.GRPC:
target_proxy_type = 'GRPC'
create_proxy_fn = self.compute.create_target_grpc_proxy
self.target_proxy_is_http = False
elif self.backend_service_protocol is BackendServiceProtocol.HTTP2:
target_proxy_type = 'HTTP'
create_proxy_fn = self.compute.create_target_http_proxy
self.target_proxy_is_http = True
else:
raise TypeError('Unexpected backend service protocol')
logger.info('Creating target %s proxy "%s" to URL map %s', name,
target_proxy_type, self.url_map.name)
self.target_proxy = create_proxy_fn(name, self.url_map)
def delete_target_grpc_proxy(self, force=False):
if force:
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy:
name = self.target_proxy.name
else:
return
logger.info('Deleting Target GRPC proxy "%s"', name)
self.compute.delete_target_grpc_proxy(name)
self.target_proxy = None
self.target_proxy_is_http = False
def delete_target_http_proxy(self, force=False):
if force:
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy and self.target_proxy_is_http:
name = self.target_proxy.name
else:
return
logger.info('Deleting HTTP Target proxy "%s"', name)
self.compute.delete_target_http_proxy(name)
self.target_proxy = None
self.target_proxy_is_http = False
def find_unused_forwarding_rule_port(
self,
*,
lo: int = 1024,
hi: int = 65535,
attempts: int = 25) -> int:
for attempts in range(attempts):
src_port = random.randint(lo, hi)
if not (self.compute.exists_forwarding_rule(src_port)):
return src_port
raise RuntimeError("Couldn't find unused forwarding rule port")
def create_forwarding_rule(self, src_port: int):
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
src_port = int(src_port)
logging.info(
'Creating forwarding rule "%s" in network "%s": 0.0.0.0:%s -> %s',
name, self.network, src_port, self.target_proxy.url)
resource = self.compute.create_forwarding_rule(name, src_port,
self.target_proxy,
self.network_url)
self.forwarding_rule = resource
return resource
def delete_forwarding_rule(self, force=False):
if force:
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
elif self.forwarding_rule:
name = self.forwarding_rule.name
else:
return
logger.info('Deleting Forwarding rule "%s"', name)
self.compute.delete_forwarding_rule(name)
self.forwarding_rule = None
def create_firewall_rule(self, allowed_ports: List[str]):
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
logging.info(
'Creating firewall rule "%s" in network "%s" with allowed ports %s',
name, self.network, allowed_ports)
resource = self.compute.create_firewall_rule(
name, self.network_url, xds_flags.FIREWALL_SOURCE_RANGE.value,
allowed_ports)
self.firewall_rule = resource
def delete_firewall_rule(self, force=False):
if force:
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
elif self.firewall_rule:
name = self.firewall_rule.name
else:
return
logger.info('Deleting Firewall Rule "%s"', name)
self.compute.delete_firewall_rule(name)
self.firewall_rule = None
class TrafficDirectorAppNetManager(TrafficDirectorManager):
GRPC_ROUTE_NAME = "grpc-route"
ROUTER_NAME = "router"
netsvc: _NetworkServicesV1Alpha1
def __init__(self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
config_scope: str,
resource_suffix: Optional[str] = None,
network: str = 'default',
compute_api_version: str = 'v1'):
super().__init__(gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network,
compute_api_version=compute_api_version)
self.config_scope = config_scope
# API
self.netsvc = _NetworkServicesV1Alpha1(gcp_api_manager, project)
# Managed resources
self.grpc_route: Optional[_NetworkServicesV1Alpha1.GrpcRoute] = None
self.router: Optional[_NetworkServicesV1Alpha1.Router] = None
def create_router(self) -> GcpResource:
name = self.make_resource_name(self.ROUTER_NAME)
logger.info("Creating Router %s", name)
body = {
"type": "PROXYLESS_GRPC",
"scope": self.config_scope,
}
resource = self.netsvc.create_router(name, body)
self.router = self.netsvc.get_router(name)
logger.debug("Loaded Router: %s", self.router)
return resource
def delete_router(self, force=False):
if force:
name = self.make_resource_name(self.ROUTER_NAME)
elif self.router:
name = self.router.name
else:
return
logger.info('Deleting Router %s', name)
self.netsvc.delete_router(name)
self.router = None
def create_grpc_route(self, src_host: str, src_port: int) -> GcpResource:
host = f'{src_host}:{src_port}'
service_name = self.netsvc.resource_full_name(self.backend_service.name,
"backendServices")
body = {
"routers": [self.router.url],
"hostnames":
host,
"rules": [{
"action": {
"destinations": [{
"serviceName": service_name
}]
}
}],
}
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
logger.info("Creating GrpcRoute %s", name)
resource = self.netsvc.create_grpc_route(name, body)
self.grpc_route = self.netsvc.get_grpc_route(name)
logger.debug("Loaded GrpcRoute: %s", self.grpc_route)
return resource
def create_grpc_route_with_content(self, body: Any) -> GcpResource:
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
logger.info("Creating GrpcRoute %s", name)
resource = self.netsvc.create_grpc_route(name, body)
self.grpc_route = self.netsvc.get_grpc_route(name)
logger.debug("Loaded GrpcRoute: %s", self.grpc_route)
return resource
def delete_grpc_route(self, force=False):
if force:
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
elif self.grpc_route:
name = self.grpc_route.name
else:
return
logger.info('Deleting GrpcRoute %s', name)
self.netsvc.delete_grpc_route(name)
self.grpc_route = None
def cleanup(self, *, force=False):
self.delete_grpc_route(force=force)
self.delete_router(force=force)
super().cleanup(force=force)
class TrafficDirectorSecureManager(TrafficDirectorManager):
SERVER_TLS_POLICY_NAME = "server-tls-policy"
CLIENT_TLS_POLICY_NAME = "client-tls-policy"
AUTHZ_POLICY_NAME = "authz-policy"
ENDPOINT_POLICY = "endpoint-policy"
CERTIFICATE_PROVIDER_INSTANCE = "google_cloud_private_spiffe"
netsec: _NetworkSecurityV1Beta1
netsvc: _NetworkServicesV1Beta1
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: Optional[str] = None,
network: str = 'default',
compute_api_version: str = 'v1',
):
super().__init__(gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network,
compute_api_version=compute_api_version)
# API
self.netsec = _NetworkSecurityV1Beta1(gcp_api_manager, project)
self.netsvc = _NetworkServicesV1Beta1(gcp_api_manager, project)
# Managed resources
self.server_tls_policy: Optional[ServerTlsPolicy] = None
self.client_tls_policy: Optional[ClientTlsPolicy] = None
self.authz_policy: Optional[AuthorizationPolicy] = None
self.endpoint_policy: Optional[EndpointPolicy] = None
def setup_server_security(self,
*,
server_namespace,
server_name,
server_port,
tls=True,
mtls=True):
self.create_server_tls_policy(tls=tls, mtls=mtls)
self.create_endpoint_policy(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port)
def setup_client_security(self,
*,
server_namespace,
server_name,
tls=True,
mtls=True):
self.create_client_tls_policy(tls=tls, mtls=mtls)
self.backend_service_apply_client_mtls_policy(server_namespace,
server_name)
def cleanup(self, *, force=False):
# Cleanup in the reverse order of creation
super().cleanup(force=force)
self.delete_endpoint_policy(force=force)
self.delete_server_tls_policy(force=force)
self.delete_client_tls_policy(force=force)
self.delete_authz_policy(force=force)
def create_server_tls_policy(self, *, tls, mtls):
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
logger.info('Creating Server TLS Policy %s', name)
if not tls and not mtls:
logger.warning(
'Server TLS Policy %s neither TLS, nor mTLS '
'policy. Skipping creation', name)
return
certificate_provider = self._get_certificate_provider()
policy = {}
if tls:
policy["serverCertificate"] = certificate_provider
if mtls:
policy["mtlsPolicy"] = {
"clientValidationCa": [certificate_provider],
}
self.netsec.create_server_tls_policy(name, policy)
self.server_tls_policy = self.netsec.get_server_tls_policy(name)
logger.debug('Server TLS Policy loaded: %r', self.server_tls_policy)
def delete_server_tls_policy(self, force=False):
if force:
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
elif self.server_tls_policy:
name = self.server_tls_policy.name
else:
return
logger.info('Deleting Server TLS Policy %s', name)
self.netsec.delete_server_tls_policy(name)
self.server_tls_policy = None
def create_authz_policy(self, *, action: str, rules: list):
name = self.make_resource_name(self.AUTHZ_POLICY_NAME)
logger.info('Creating Authz Policy %s', name)
policy = {
"action": action,
"rules": rules,
}
self.netsec.create_authz_policy(name, policy)
self.authz_policy = self.netsec.get_authz_policy(name)
logger.debug('Authz Policy loaded: %r', self.authz_policy)
def delete_authz_policy(self, force=False):
if force:
name = self.make_resource_name(self.AUTHZ_POLICY_NAME)
elif self.authz_policy:
name = self.authz_policy.name
else:
return
logger.info('Deleting Authz Policy %s', name)
self.netsec.delete_authz_policy(name)
self.authz_policy = None
def create_endpoint_policy(self, *, server_namespace: str, server_name: str,
server_port: int) -> None:
name = self.make_resource_name(self.ENDPOINT_POLICY)
logger.info('Creating Endpoint Policy %s', name)
endpoint_matcher_labels = [{
"labelName": "app",
"labelValue": f"{server_namespace}-{server_name}"
}]
port_selector = {"ports": [str(server_port)]}
label_matcher_all = {
"metadataLabelMatchCriteria": "MATCH_ALL",
"metadataLabels": endpoint_matcher_labels,
}
config = {
"type": "GRPC_SERVER",
"trafficPortSelector": port_selector,
"endpointMatcher": {
"metadataLabelMatcher": label_matcher_all,
},
}
if self.server_tls_policy:
config["serverTlsPolicy"] = self.server_tls_policy.name
else:
logger.warning(
'Creating Endpoint Policy %s with '
'no Server TLS policy attached', name)
if self.authz_policy:
config["authorizationPolicy"] = self.authz_policy.name
self.netsvc.create_endpoint_policy(name, config)
self.endpoint_policy = self.netsvc.get_endpoint_policy(name)
logger.debug('Loaded Endpoint Policy: %r', self.endpoint_policy)
def delete_endpoint_policy(self, force: bool = False) -> None:
if force:
name = self.make_resource_name(self.ENDPOINT_POLICY)
elif self.endpoint_policy:
name = self.endpoint_policy.name
else:
return
logger.info('Deleting Endpoint Policy %s', name)
self.netsvc.delete_endpoint_policy(name)
self.endpoint_policy = None
def create_client_tls_policy(self, *, tls, mtls):
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
logger.info('Creating Client TLS Policy %s', name)
if not tls and not mtls:
logger.warning(
'Client TLS Policy %s neither TLS, nor mTLS '
'policy. Skipping creation', name)
return
certificate_provider = self._get_certificate_provider()
policy = {}
if tls:
policy["serverValidationCa"] = [certificate_provider]
if mtls:
policy["clientCertificate"] = certificate_provider
self.netsec.create_client_tls_policy(name, policy)
self.client_tls_policy = self.netsec.get_client_tls_policy(name)
logger.debug('Client TLS Policy loaded: %r', self.client_tls_policy)
def delete_client_tls_policy(self, force=False):
if force:
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
elif self.client_tls_policy:
name = self.client_tls_policy.name
else:
return
logger.info('Deleting Client TLS Policy %s', name)
self.netsec.delete_client_tls_policy(name)
self.client_tls_policy = None
def backend_service_apply_client_mtls_policy(
self,
server_namespace,
server_name,
):
if not self.client_tls_policy:
logger.warning(
'Client TLS policy not created, '
'skipping attaching to Backend Service %s',
self.backend_service.name)
return
server_spiffe = (f'spiffe://{self.project}.svc.id.goog/'
f'ns/{server_namespace}/sa/{server_name}')
logging.info(
'Adding Client TLS Policy to Backend Service %s: %s, '
'server %s', self.backend_service.name, self.client_tls_policy.url,
server_spiffe)
self.compute.patch_backend_service(
self.backend_service, {
'securitySettings': {
'clientTlsPolicy': self.client_tls_policy.url,
'subjectAltNames': [server_spiffe]
}
})
@classmethod
def _get_certificate_provider(cls):
return {
"certificateProviderInstance": {
"pluginInstance": cls.CERTIFICATE_PROVIDER_INSTANCE,
},
}
| true | true |
f71bc2a2616d37575b4b7913e3c9a01e51b63aa1 | 3,636 | py | Python | client/chat/main.py | rrsilaya/spaceteam | eca853d82f14d1d5f5f892977dfb35d20da40d0b | [
"MIT"
] | null | null | null | client/chat/main.py | rrsilaya/spaceteam | eca853d82f14d1d5f5f892977dfb35d20da40d0b | [
"MIT"
] | null | null | null | client/chat/main.py | rrsilaya/spaceteam | eca853d82f14d1d5f5f892977dfb35d20da40d0b | [
"MIT"
] | null | null | null | import sys
from threading import Thread
from connection import TcpConnection
from proto.tcp_packet_pb2 import TcpPacket
class Chat():
def __init__(self):
self.connection = TcpConnection()
self.packet = TcpPacket()
def createLobby(self, maxPlayers, *args):
payload = self.packet.CreateLobbyPacket()
payload.type = self.packet.CREATE_LOBBY
payload.max_players = maxPlayers
if len(args) > 2:
payload.lobby_id = args[2]
lobby = self.connection.send(payload)
payload.ParseFromString(lobby)
return payload.lobby_id
def connect(self, id, *args):
payload = self.packet.ConnectPacket()
payload.type = self.packet.CONNECT
payload.lobby_id = id
payload.player.name = args[0] if args else 'anon'
self.user = payload.player
self.lobby = payload.lobby_id
lobby = self.connection.send(payload)
self.packet.ParseFromString(lobby)
if self.packet.type == self.packet.CONNECT:
payload.ParseFromString(lobby)
return payload.lobby_id
elif self.packet.type == self.packet.ERR_LDNE:
payload = self.packet.ErrLdnePacket()
payload.ParseFromString(lobby)
print(payload.err_message)
sys.exit(1)
elif self.packet.type == self.packet.ERR_LFULL:
payload = self.packet.ErrLfullPacket()
payload.ParseFromString(lobby)
print(payload.err_message)
sys.exit(1)
def listen(self, receiveCallback):
self.receiveCallback = receiveCallback
self.stream = Thread(target=self.connection.receive, args=[self._parsePacket])
self.stream.start()
def sendChat(self, message):
payload = self.packet.ChatPacket()
payload.type = self.packet.CHAT
payload.message = message
payload.player.name = self.user.name
payload.lobby_id = self.lobby
return payload
def getPlayerList(self):
payload = self.packet.PlayerListPacket()
payload.type = self.packet.PLAYER_LIST
return payload
def disconnect(self):
payload = self.packet.DisconnectPacket()
payload.type = self.packet.DISCONNECT
payload.player.name = self.user.name
payload.player.id = self.user.id
self.connection.asyncsend(payload)
self.connection.close()
def _parse(type, packet):
data = type()
data.ParseFromString(packet)
return data
def _parsePacket(self, data):
self.packet.ParseFromString(data)
if self.packet.type == self.packet.DISCONNECT:
data = Chat._parse(self.packet.DisconnectPacket, data)
self.receiveCallback('\n<', color='RED')
self.receiveCallback(data.player.name)
self.receiveCallback('> has left the chat room>\n\n', color='RED')
elif self.packet.type == self.packet.CONNECT:
data = Chat._parse(self.packet.ConnectPacket, data)
self.receiveCallback('\n<', color='GREEN')
self.receiveCallback(data.player.name)
self.receiveCallback('> has joined the chat>\n\n', color='GREEN')
elif self.packet.type == self.packet.CHAT:
data = Chat._parse(self.packet.ChatPacket, data)
self.receiveCallback(data.player.name + ': ', color='YELLOW')
self.receiveCallback(data.message + '\n')
elif self.packet.type == self.packet.PLAYER_LIST:
data = Chat._parse(self.packet.PlayerListPacket, data)
self.receiveCallback('\n[PLAYER LIST]\n', color='GREEN')
for player in data.player_list:
self.receiveCallback('> {}@{}\n'.format(player.name, player.id))
self.receiveCallback('\n')
def _encode(self, stdin):
if stdin == '^players':
data = self.getPlayerList()
else:
data = self.sendChat(stdin)
return data
| 27.969231 | 82 | 0.686469 | import sys
from threading import Thread
from connection import TcpConnection
from proto.tcp_packet_pb2 import TcpPacket
class Chat():
def __init__(self):
self.connection = TcpConnection()
self.packet = TcpPacket()
def createLobby(self, maxPlayers, *args):
payload = self.packet.CreateLobbyPacket()
payload.type = self.packet.CREATE_LOBBY
payload.max_players = maxPlayers
if len(args) > 2:
payload.lobby_id = args[2]
lobby = self.connection.send(payload)
payload.ParseFromString(lobby)
return payload.lobby_id
def connect(self, id, *args):
payload = self.packet.ConnectPacket()
payload.type = self.packet.CONNECT
payload.lobby_id = id
payload.player.name = args[0] if args else 'anon'
self.user = payload.player
self.lobby = payload.lobby_id
lobby = self.connection.send(payload)
self.packet.ParseFromString(lobby)
if self.packet.type == self.packet.CONNECT:
payload.ParseFromString(lobby)
return payload.lobby_id
elif self.packet.type == self.packet.ERR_LDNE:
payload = self.packet.ErrLdnePacket()
payload.ParseFromString(lobby)
print(payload.err_message)
sys.exit(1)
elif self.packet.type == self.packet.ERR_LFULL:
payload = self.packet.ErrLfullPacket()
payload.ParseFromString(lobby)
print(payload.err_message)
sys.exit(1)
def listen(self, receiveCallback):
self.receiveCallback = receiveCallback
self.stream = Thread(target=self.connection.receive, args=[self._parsePacket])
self.stream.start()
def sendChat(self, message):
payload = self.packet.ChatPacket()
payload.type = self.packet.CHAT
payload.message = message
payload.player.name = self.user.name
payload.lobby_id = self.lobby
return payload
def getPlayerList(self):
payload = self.packet.PlayerListPacket()
payload.type = self.packet.PLAYER_LIST
return payload
def disconnect(self):
payload = self.packet.DisconnectPacket()
payload.type = self.packet.DISCONNECT
payload.player.name = self.user.name
payload.player.id = self.user.id
self.connection.asyncsend(payload)
self.connection.close()
def _parse(type, packet):
data = type()
data.ParseFromString(packet)
return data
def _parsePacket(self, data):
self.packet.ParseFromString(data)
if self.packet.type == self.packet.DISCONNECT:
data = Chat._parse(self.packet.DisconnectPacket, data)
self.receiveCallback('\n<', color='RED')
self.receiveCallback(data.player.name)
self.receiveCallback('> has left the chat room>\n\n', color='RED')
elif self.packet.type == self.packet.CONNECT:
data = Chat._parse(self.packet.ConnectPacket, data)
self.receiveCallback('\n<', color='GREEN')
self.receiveCallback(data.player.name)
self.receiveCallback('> has joined the chat>\n\n', color='GREEN')
elif self.packet.type == self.packet.CHAT:
data = Chat._parse(self.packet.ChatPacket, data)
self.receiveCallback(data.player.name + ': ', color='YELLOW')
self.receiveCallback(data.message + '\n')
elif self.packet.type == self.packet.PLAYER_LIST:
data = Chat._parse(self.packet.PlayerListPacket, data)
self.receiveCallback('\n[PLAYER LIST]\n', color='GREEN')
for player in data.player_list:
self.receiveCallback('> {}@{}\n'.format(player.name, player.id))
self.receiveCallback('\n')
def _encode(self, stdin):
if stdin == '^players':
data = self.getPlayerList()
else:
data = self.sendChat(stdin)
return data
| true | true |
f71bc4b4bf4d80efae10e1ebc5854fc9aae76ce2 | 605 | py | Python | OrderManagement/migrations/0006_auto_20201114_0349.py | glen-s-abraham/OnlineDelivery | 138356e17e52d0a4d4a1778f9c440006ce28ae5c | [
"MIT"
] | null | null | null | OrderManagement/migrations/0006_auto_20201114_0349.py | glen-s-abraham/OnlineDelivery | 138356e17e52d0a4d4a1778f9c440006ce28ae5c | [
"MIT"
] | null | null | null | OrderManagement/migrations/0006_auto_20201114_0349.py | glen-s-abraham/OnlineDelivery | 138356e17e52d0a4d4a1778f9c440006ce28ae5c | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-14 03:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('OrderManagement', '0005_auto_20201107_0415'),
]
operations = [
migrations.AlterField(
model_name='order',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
| 27.5 | 131 | 0.682645 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('OrderManagement', '0005_auto_20201107_0415'),
]
operations = [
migrations.AlterField(
model_name='order',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f71bc4c5740b27cfc91942e00a13eeb0e9219ee7 | 8,569 | py | Python | SNAPPacket.py | MydonSolutions/SNAPpyPackets | 65565ad7edf98371bcfb2b6780a20680afb10347 | [
"Apache-2.0"
] | null | null | null | SNAPPacket.py | MydonSolutions/SNAPpyPackets | 65565ad7edf98371bcfb2b6780a20680afb10347 | [
"Apache-2.0"
] | null | null | null | SNAPPacket.py | MydonSolutions/SNAPpyPackets | 65565ad7edf98371bcfb2b6780a20680afb10347 | [
"Apache-2.0"
] | null | null | null | mask4bits = ((1 << 4) -1)
import numpy as np
mask8bits = ((1 << 8) -1)
mask16bits = ((1 << 16) -1)
mask64bits = ((1 << 64) -1)
class SNAPPacket(object):
"""
ATA SNAP Firmware Manual, Release 2.0.0
---------------------------------------
Section 2.3.2 "Output Data Formats: Voltage Packets", pg 5
https://github.com/realtimeradio/ata_snap/blob/nov-observing/docs/manual.pdf
struct voltage_packet {
uint8_t version;
uint8_t type;
uint16_t n_chans;
uint16_t chan;
uint16_t feng_id
uint64_t timestamp;
complex4 data[n_chans, 16, 2] // 4-bit real + 4-bit imaginary
};
• version; Firmware version: Bit [7] is always 1 for Voltage packets. The remaining bits contain a
compile-time defined firmware version, represented in the form bit[6].bits[5:3].bits[2:0]. This document
refers to firmware version 2.0.0.
• type; Packet type: Bit [0] is 1 if the axes of data payload are in order [slowest to fastest] channel x time x
polarization. This is currently the only supported mode. Bit [1] is 0 if the data payload comprises 4+4 bit
complex integers. This is currently the only supported mode.
• n_chans; Number of Channels: Indicates the number of frequency channels present in the payload of
this data packet.
• chan; Channel number: The index of the first channel present in this packet. For example, a channel
number c implies the packet contains channels c to c + n_chans - 1.
• feng_id; Antenna ID: A runtime configurable ID which uniquely associates a packet with a particular
SNAP board.
• timestamp; Sample number: The index of the first time sample present in this packet. For example, a
sample number 𝑠 implies the packet contains samples 𝑠 to 𝑠 + 15. Sample number can be referred to GPS
time through knowledge of the system sampling
"""
def __init__(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
samples: [int] = None,
packetBytes: bytearray = None,
byteorder: str = 'big'
):
self.bytearr = bytearray(8192+16)
self.payloadbytes = -1
if packetBytes is not None:
self.setHeader(
int.from_bytes(packetBytes[0:1], byteorder=byteorder),
int.from_bytes(packetBytes[1:2], byteorder=byteorder),
int.from_bytes(packetBytes[2:4], byteorder=byteorder),
int.from_bytes(packetBytes[4:6], byteorder=byteorder),
int.from_bytes(packetBytes[6:8], byteorder=byteorder),
int.from_bytes(packetBytes[8:16], byteorder=byteorder)
)
self.setSampleBytes(packetBytes[16:])
else:
if not self.setHeader(fwVersion, packetType, channels, channelNum, fEngineId, packetNumber):
exit()
if not self.setSamples(samples):
exit()
def setHeader(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
update: bool = False
):
notAllArgs = False
if fwVersion is not None:
self.fwVersion = fwVersion & mask8bits
self.bytearr[0] = self.fwVersion
else:
notAllArgs = True
if packetType is not None:
self.packetType = (3 if packetType else 0) & mask8bits
self.bytearr[1] = self.packetType
else:
notAllArgs = True
if channels is not None:
self.channels = channels & mask16bits
self.bytearr[2] = (self.channels >> 8) & mask8bits
self.bytearr[3] = self.channels & mask8bits
else:
notAllArgs = True
if channelNum is not None:
self.channelNum = channelNum & mask16bits
self.bytearr[4] = (self.channelNum >> 8) & mask8bits
self.bytearr[5] = self.channelNum & mask8bits
else:
notAllArgs = True
if fEngineId is not None:
self.fEngineId = fEngineId & mask16bits
self.bytearr[6] = (self.fEngineId >> 8) & mask8bits
self.bytearr[7] = self.fEngineId & mask8bits
else:
notAllArgs = True
if packetNumber is not None:
self.packetNumber = packetNumber & mask64bits
self.bytearr[ 8] = (self.packetNumber >> 56) & mask8bits
self.bytearr[ 9] = (self.packetNumber >> 48) & mask8bits
self.bytearr[10] = (self.packetNumber >> 40) & mask8bits
self.bytearr[11] = (self.packetNumber >> 32) & mask8bits
self.bytearr[12] = (self.packetNumber >> 24) & mask8bits
self.bytearr[13] = (self.packetNumber >> 16) & mask8bits
self.bytearr[14] = (self.packetNumber >> 8) & mask8bits
self.bytearr[15] = self.packetNumber & mask8bits
else:
notAllArgs = True
self.payloadbytes = self.channels * 2 * 16
if notAllArgs and not update:
print("Please provide all of the header's arguments.");
self.payloadbytes = -1
return False
return True
def setSamples(self, samples):
if len(samples)/2 != self.payloadbytes:
print("Header inferred payload byte size {} differs from samples length {}\n".format(
self.payloadbytes, len(samples)/2
))
return False
for sampleI in range(self.payloadbytes):
self.bytearr[16+sampleI] = ((samples[2*sampleI] & mask4bits) << 4) + (samples[2*sampleI+1] & mask4bits)
return True
def setSampleBytes(self, samples):
if len(samples) != self.payloadbytes:
print("Header inferred payload byte size {} differs from samples length {}\n".format(
self.payloadbytes, len(samples)
))
return False
self.bytearr[16:self.payloadbytes] = samples
return True
def packet(self):
return self.bytearr[:16+self.payloadbytes]
def print(self, headerOnly=False):
if headerOnly:
print(self.headerStr())
else:
print(self.str())
def twosCompliment(self, value, bits):
return value if value < (1<<(bits-1)) else (value % (1<<(bits-1))) - (1<<(bits-1))
def str(self):
return """{}
\rSamples (0x): {}""".format(self.headerStr(),
[complex(self.twosCompliment(i>>4, 4) , self.twosCompliment(i & mask4bits, 4))
for i in self.bytearr[16:self.payloadbytes]])
def headerStr(self):
return """Firmware Version: {}
\rPacket type: {}
\rNumber of Channels: {}
\rChannel number: {}
\rAntenna ID: {}
\rPacket number: {}
\rPayload bytes: {}""".format(self.fwVersion,
self.packetType,
self.channels,
self.channelNum,
self.fEngineId,
self.packetNumber,
self.payloadbytes)
def update(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
samples: [int] = None
):
self.setHeader(fwVersion, packetType, channels, channelNum, fEngineId, packetNumber, update=True)
if samples is not None:
self.setSamples(samples)
if __name__ == '__main__':
testPacket = SNAPPacket(
0,
True,
2,
2,
0,
3735928559,
[i % 16 for i in range(16*2*2)]
)
testPacket.print()
testPacketBytes = testPacket.packet()
dupPacket = SNAPPacket(packetBytes=testPacketBytes)
dupPacket.print()
dupPacketBytes = dupPacket.packet()
print(testPacketBytes)
print(dupPacketBytes)
| 38.95 | 119 | 0.555257 | mask4bits = ((1 << 4) -1)
import numpy as np
mask8bits = ((1 << 8) -1)
mask16bits = ((1 << 16) -1)
mask64bits = ((1 << 64) -1)
class SNAPPacket(object):
def __init__(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
samples: [int] = None,
packetBytes: bytearray = None,
byteorder: str = 'big'
):
self.bytearr = bytearray(8192+16)
self.payloadbytes = -1
if packetBytes is not None:
self.setHeader(
int.from_bytes(packetBytes[0:1], byteorder=byteorder),
int.from_bytes(packetBytes[1:2], byteorder=byteorder),
int.from_bytes(packetBytes[2:4], byteorder=byteorder),
int.from_bytes(packetBytes[4:6], byteorder=byteorder),
int.from_bytes(packetBytes[6:8], byteorder=byteorder),
int.from_bytes(packetBytes[8:16], byteorder=byteorder)
)
self.setSampleBytes(packetBytes[16:])
else:
if not self.setHeader(fwVersion, packetType, channels, channelNum, fEngineId, packetNumber):
exit()
if not self.setSamples(samples):
exit()
def setHeader(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
update: bool = False
):
notAllArgs = False
if fwVersion is not None:
self.fwVersion = fwVersion & mask8bits
self.bytearr[0] = self.fwVersion
else:
notAllArgs = True
if packetType is not None:
self.packetType = (3 if packetType else 0) & mask8bits
self.bytearr[1] = self.packetType
else:
notAllArgs = True
if channels is not None:
self.channels = channels & mask16bits
self.bytearr[2] = (self.channels >> 8) & mask8bits
self.bytearr[3] = self.channels & mask8bits
else:
notAllArgs = True
if channelNum is not None:
self.channelNum = channelNum & mask16bits
self.bytearr[4] = (self.channelNum >> 8) & mask8bits
self.bytearr[5] = self.channelNum & mask8bits
else:
notAllArgs = True
if fEngineId is not None:
self.fEngineId = fEngineId & mask16bits
self.bytearr[6] = (self.fEngineId >> 8) & mask8bits
self.bytearr[7] = self.fEngineId & mask8bits
else:
notAllArgs = True
if packetNumber is not None:
self.packetNumber = packetNumber & mask64bits
self.bytearr[ 8] = (self.packetNumber >> 56) & mask8bits
self.bytearr[ 9] = (self.packetNumber >> 48) & mask8bits
self.bytearr[10] = (self.packetNumber >> 40) & mask8bits
self.bytearr[11] = (self.packetNumber >> 32) & mask8bits
self.bytearr[12] = (self.packetNumber >> 24) & mask8bits
self.bytearr[13] = (self.packetNumber >> 16) & mask8bits
self.bytearr[14] = (self.packetNumber >> 8) & mask8bits
self.bytearr[15] = self.packetNumber & mask8bits
else:
notAllArgs = True
self.payloadbytes = self.channels * 2 * 16
if notAllArgs and not update:
print("Please provide all of the header's arguments.");
self.payloadbytes = -1
return False
return True
def setSamples(self, samples):
if len(samples)/2 != self.payloadbytes:
print("Header inferred payload byte size {} differs from samples length {}\n".format(
self.payloadbytes, len(samples)/2
))
return False
for sampleI in range(self.payloadbytes):
self.bytearr[16+sampleI] = ((samples[2*sampleI] & mask4bits) << 4) + (samples[2*sampleI+1] & mask4bits)
return True
def setSampleBytes(self, samples):
if len(samples) != self.payloadbytes:
print("Header inferred payload byte size {} differs from samples length {}\n".format(
self.payloadbytes, len(samples)
))
return False
self.bytearr[16:self.payloadbytes] = samples
return True
def packet(self):
return self.bytearr[:16+self.payloadbytes]
def print(self, headerOnly=False):
if headerOnly:
print(self.headerStr())
else:
print(self.str())
def twosCompliment(self, value, bits):
return value if value < (1<<(bits-1)) else (value % (1<<(bits-1))) - (1<<(bits-1))
def str(self):
return """{}
\rSamples (0x): {}""".format(self.headerStr(),
[complex(self.twosCompliment(i>>4, 4) , self.twosCompliment(i & mask4bits, 4))
for i in self.bytearr[16:self.payloadbytes]])
def headerStr(self):
return """Firmware Version: {}
\rPacket type: {}
\rNumber of Channels: {}
\rChannel number: {}
\rAntenna ID: {}
\rPacket number: {}
\rPayload bytes: {}""".format(self.fwVersion,
self.packetType,
self.channels,
self.channelNum,
self.fEngineId,
self.packetNumber,
self.payloadbytes)
def update(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
samples: [int] = None
):
self.setHeader(fwVersion, packetType, channels, channelNum, fEngineId, packetNumber, update=True)
if samples is not None:
self.setSamples(samples)
if __name__ == '__main__':
testPacket = SNAPPacket(
0,
True,
2,
2,
0,
3735928559,
[i % 16 for i in range(16*2*2)]
)
testPacket.print()
testPacketBytes = testPacket.packet()
dupPacket = SNAPPacket(packetBytes=testPacketBytes)
dupPacket.print()
dupPacketBytes = dupPacket.packet()
print(testPacketBytes)
print(dupPacketBytes)
| true | true |
f71bc5652835fe64720685b82cd43fc0850879a0 | 1,794 | py | Python | tools/ncbi_blast_plus/check_no_duplicates.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/ncbi_blast_plus/check_no_duplicates.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | tools/ncbi_blast_plus/check_no_duplicates.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
"""Check for duplicate sequence identifiers in FASTA files.
This is run as a pre-check before makeblastdb, in order to avoid
a regression bug in BLAST+ 2.2.28 which fails to catch this. See:
http://blastedbio.blogspot.co.uk/2012/10/my-ids-not-good-enough-for-ncbi-blast.html
This script takes one or more FASTA filenames as input, and
will return a non-zero error if any duplicate identifiers
are found.
"""
import gzip
import os
import sys
if "-v" in sys.argv or "--version" in sys.argv:
print("v0.0.23")
sys.exit(0)
identifiers = set()
files = 0
for filename in sys.argv[1:]:
if not os.path.isfile(filename):
sys.stderr.write("Missing FASTA file %r\n" % filename)
sys.exit(2)
files += 1
with open(filename, "rb") as binary_handle:
magic = binary_handle.read(2)
if not magic:
# Empty file, special case
continue
elif magic == b'\x1f\x8b':
# Gzipped
handle = gzip.open(filename, "rt")
elif magic[0:1] == b">":
# Not gzipped, shoudl be plain FASTA
handle = open(filename, "r")
for line in handle:
if line.startswith(">"):
# The split will also take care of the new line character,
# e.g. ">test\n" and ">test description here\n" both give "test"
seq_id = line[1:].split(None, 1)[0]
if seq_id in identifiers:
handle.close()
sys.exit("Repeated identifiers, e.g. %r" % seq_id)
identifiers.add(seq_id)
handle.close()
if not files:
sys.stderr.write("No FASTA files given to check for duplicates\n")
sys.exit(3)
elif files == 1:
print("%i sequences" % len(identifiers))
else:
print("%i sequences in %i FASTA files" % (len(identifiers), files))
| 30.931034 | 83 | 0.627648 |
import gzip
import os
import sys
if "-v" in sys.argv or "--version" in sys.argv:
print("v0.0.23")
sys.exit(0)
identifiers = set()
files = 0
for filename in sys.argv[1:]:
if not os.path.isfile(filename):
sys.stderr.write("Missing FASTA file %r\n" % filename)
sys.exit(2)
files += 1
with open(filename, "rb") as binary_handle:
magic = binary_handle.read(2)
if not magic:
continue
elif magic == b'\x1f\x8b':
handle = gzip.open(filename, "rt")
elif magic[0:1] == b">":
handle = open(filename, "r")
for line in handle:
if line.startswith(">"):
seq_id = line[1:].split(None, 1)[0]
if seq_id in identifiers:
handle.close()
sys.exit("Repeated identifiers, e.g. %r" % seq_id)
identifiers.add(seq_id)
handle.close()
if not files:
sys.stderr.write("No FASTA files given to check for duplicates\n")
sys.exit(3)
elif files == 1:
print("%i sequences" % len(identifiers))
else:
print("%i sequences in %i FASTA files" % (len(identifiers), files))
| true | true |
f71bc619e7d4702a0d959637f7bce8d52e79debf | 3,636 | py | Python | LaserCommandConstants.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | LaserCommandConstants.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | LaserCommandConstants.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | """
Laser Commands are a middle language of commands for spooling and interpreting.
NOTE: Never use the integer value, only the command name. The integer values are
permitted to change.
COMMAND_PLOT: takes a plot object to generate simple plot commands.
COMMAND_RASTER: takes a raster plot object which generates simple raster commands.
Simple plot values are x, y, on. Where x and y are the position in absolute values and on is whether the laser fires
for that particular move command. The plot is expected to use svgelements code, passed to zinglplotter code.
The raster is expected to used RasterBuilder which should be able to plot any raster in any fashion.
A COMMAND_RESUME would have to be issued in realtime since in a paused state the commands are not processed.
"""
COMMAND_LASER_OFF = 1 # Turns laser off
COMMAND_LASER_ON = 2 # Turns laser on
COMMAND_LASER_DISABLE = 5 # Disables the laser
COMMAND_LASER_ENABLE = 6 # Enables the laser
COMMAND_MOVE = 10 # Performs a line move
COMMAND_CUT = 11 # Performs a line cut.
COMMAND_WAIT = 20 # Pauses the given time in seconds. (floats accepted).
COMMAND_WAIT_FINISH = 21 # WAIT until the buffer is finished.
COMMAND_MODE_RAPID = 50
COMMAND_MODE_PROGRAM = 51
COMMAND_MODE_FINISHED = 52
COMMAND_PLOT = 100 # Takes a plot object
COMMAND_RASTER = 101 # Takes a raster plot object.
COMMAND_SET_SPEED = 200 # sets the speed for the device
COMMAND_SET_POWER = 201 # sets the power. Out of 1000. Unknown power method.
COMMAND_SET_PPI = 203 # sets the PPI power. Out of 1000.
COMMAND_SET_PWM = 203 # sets the PWM power. Out of 1000.
COMMAND_SET_STEP = 205 # sets the raster step for the device
COMMAND_SET_DIRECTION = 209 # sets the directions for the device.
COMMAND_SET_OVERSCAN = 206
COMMAND_SET_D_RATIO = 207 # sets the diagonal_ratio for the device
COMMAND_SET_ACCELERATION = 208 # sets the acceleration for the device 1-4
COMMAND_SET_INCREMENTAL = 210 # sets the commands to be relative to current position
COMMAND_SET_ABSOLUTE = 211 # sets the commands to be absolute positions.
COMMAND_SET_POSITION = 220 # Without moving sets the current position to the given coord.
COMMAND_HOME = 300 # Homes the device
COMMAND_LOCK = 301 # Locks the rail
COMMAND_UNLOCK = 302 # Unlocks the rail.
COMMAND_BEEP = 320 # Beep.
COMMAND_FUNCTION = 350 # Execute the function given by this command. Blocking.
COMMAND_SIGNAL = 360 # Sends the signal, given: "signal_name", operands.
REALTIME_RESET = 1000 # Resets the state, purges buffers
REALTIME_PAUSE = 1010 # Issue a pause command.
REALTIME_RESUME = 1020 # Issue a resume command.
REALTIME_STATUS = 1030 # Issue a status command.
REALTIME_SAFETY_DOOR = 1040 # Issues a forced safety_door state.
REALTIME_JOG_CANCEL = 1050 # Issues a jog cancel. This should cancel any jogging being processed.
REALTIME_SPEED_PERCENT = 1060 # Set the speed to this percent value of total.
REALTIME_RAPID_PERCENT = 1070 # Sets the rapid speed to this percent value of total.
REALTIME_POWER_PERCENT = 1080 # Sets the power to this percent value of total.
REALTIME_SPEED = 1061 # Set the speed to this percent value of total.
REALTIME_RAPID = 1071 # Sets the rapid speed to this percent value of total.
REALTIME_POWER = 1081 # Sets the power to this percent value of total.
REALTIME_OVERSCAN = 1091 # Sets the overscan amount to this value.
REALTIME_LASER_DISABLE = 1100 # Disables the laser.
REALTIME_LASER_ENABLE = 1101 # Enables the laser.
REALTIME_FLOOD_COOLANT = 1210 # Toggle flood coolant
REALTIME_MIST_COOLANT = 1220 # Toggle mist coolant.
| 52.695652 | 117 | 0.764851 |
COMMAND_LASER_OFF = 1
COMMAND_LASER_ON = 2
COMMAND_LASER_DISABLE = 5
COMMAND_LASER_ENABLE = 6
COMMAND_MOVE = 10
COMMAND_CUT = 11
COMMAND_WAIT = 20
COMMAND_WAIT_FINISH = 21
COMMAND_MODE_RAPID = 50
COMMAND_MODE_PROGRAM = 51
COMMAND_MODE_FINISHED = 52
COMMAND_PLOT = 100
COMMAND_RASTER = 101
COMMAND_SET_SPEED = 200
COMMAND_SET_POWER = 201
COMMAND_SET_PPI = 203
COMMAND_SET_PWM = 203
COMMAND_SET_STEP = 205
COMMAND_SET_DIRECTION = 209
COMMAND_SET_OVERSCAN = 206
COMMAND_SET_D_RATIO = 207
COMMAND_SET_ACCELERATION = 208
COMMAND_SET_INCREMENTAL = 210
COMMAND_SET_ABSOLUTE = 211
COMMAND_SET_POSITION = 220
COMMAND_HOME = 300
COMMAND_LOCK = 301
COMMAND_UNLOCK = 302
COMMAND_BEEP = 320
COMMAND_FUNCTION = 350
COMMAND_SIGNAL = 360
REALTIME_RESET = 1000
REALTIME_PAUSE = 1010
REALTIME_RESUME = 1020
REALTIME_STATUS = 1030
REALTIME_SAFETY_DOOR = 1040
REALTIME_JOG_CANCEL = 1050
REALTIME_SPEED_PERCENT = 1060
REALTIME_RAPID_PERCENT = 1070
REALTIME_POWER_PERCENT = 1080
REALTIME_SPEED = 1061
REALTIME_RAPID = 1071
REALTIME_POWER = 1081
REALTIME_OVERSCAN = 1091
REALTIME_LASER_DISABLE = 1100
REALTIME_LASER_ENABLE = 1101
REALTIME_FLOOD_COOLANT = 1210
REALTIME_MIST_COOLANT = 1220
| true | true |
f71bc6339bf87bae3cb6fd9ee3932e6b87b290b9 | 4,496 | py | Python | tools/demo.py | Nikym/PoseCNN | a63f51ee79adfb0a5be9d9aaac8e22cd8d55efbb | [
"MIT"
] | 655 | 2018-03-21T19:55:45.000Z | 2022-03-25T20:41:21.000Z | tools/demo.py | SergioRAgostinho/PoseCNN | da9eaae850eed7521a2a48a4d27474d655caab42 | [
"MIT"
] | 122 | 2018-04-04T13:57:49.000Z | 2022-03-18T09:28:44.000Z | tools/demo.py | SergioRAgostinho/PoseCNN | da9eaae850eed7521a2a48a4d27474d655caab42 | [
"MIT"
] | 226 | 2018-03-22T01:40:04.000Z | 2022-03-17T11:56:14.000Z | #!/usr/bin/env python
# --------------------------------------------------------
# FCN
# Copyright (c) 2016 RSE at UW
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# --------------------------------------------------------
"""Test a FCN on an image database."""
import _init_paths
from fcn.test import test_net_images
from fcn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
import argparse
import pprint
import time, os, sys
import tensorflow as tf
import os.path as osp
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='pretrained model',
default=None, type=str)
parser.add_argument('--model', dest='model',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='shapenet_scene_val', type=str)
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
parser.add_argument('--rig', dest='rig_name',
help='name of the camera rig file',
default=None, type=str)
parser.add_argument('--cad', dest='cad_name',
help='name of the CAD file',
default=None, type=str)
parser.add_argument('--kfusion', dest='kfusion',
help='run kinect fusion or not',
default=False, type=bool)
parser.add_argument('--pose', dest='pose_name',
help='name of the pose files',
default=None, type=str)
parser.add_argument('--background', dest='background_name',
help='name of the background file',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
weights_filename = os.path.splitext(os.path.basename(args.model))[0]
imdb = get_imdb(args.imdb_name)
# construct the filenames
root = 'data/demo_images/'
num = 5
rgb_filenames = []
depth_filenames = []
for i in xrange(num):
filename = root + '{:06d}-color.png'.format(i+1)
print filename
rgb_filenames.append(filename)
filename = root + '{:06d}-depth.png'.format(i+1)
print filename
depth_filenames.append(filename)
# construct meta data
K = np.array([[1066.778, 0, 312.9869], [0, 1067.487, 241.3109], [0, 0, 1]])
meta_data = dict({'intrinsic_matrix': K, 'factor_depth': 10000.0})
print meta_data
cfg.GPU_ID = args.gpu_id
device_name = '/gpu:{:d}'.format(args.gpu_id)
print device_name
cfg.TRAIN.NUM_STEPS = 1
cfg.TRAIN.GRID_SIZE = cfg.TEST.GRID_SIZE
cfg.TRAIN.TRAINABLE = False
cfg.RIG = args.rig_name
cfg.CAD = args.cad_name
cfg.POSE = args.pose_name
cfg.BACKGROUND = args.background_name
cfg.IS_TRAIN = False
from networks.factory import get_network
network = get_network(args.network_name)
print 'Use network `{:s}` in training'.format(args.network_name)
# start a session
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
saver.restore(sess, args.model)
print ('Loading model weights from {:s}').format(args.model)
test_net_images(sess, network, imdb, weights_filename, rgb_filenames, depth_filenames, meta_data)
| 34.584615 | 101 | 0.59008 |
"""Test a FCN on an image database."""
import _init_paths
from fcn.test import test_net_images
from fcn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
import argparse
import pprint
import time, os, sys
import tensorflow as tf
import os.path as osp
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='pretrained model',
default=None, type=str)
parser.add_argument('--model', dest='model',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='shapenet_scene_val', type=str)
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
parser.add_argument('--rig', dest='rig_name',
help='name of the camera rig file',
default=None, type=str)
parser.add_argument('--cad', dest='cad_name',
help='name of the CAD file',
default=None, type=str)
parser.add_argument('--kfusion', dest='kfusion',
help='run kinect fusion or not',
default=False, type=bool)
parser.add_argument('--pose', dest='pose_name',
help='name of the pose files',
default=None, type=str)
parser.add_argument('--background', dest='background_name',
help='name of the background file',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
weights_filename = os.path.splitext(os.path.basename(args.model))[0]
imdb = get_imdb(args.imdb_name)
root = 'data/demo_images/'
num = 5
rgb_filenames = []
depth_filenames = []
for i in xrange(num):
filename = root + '{:06d}-color.png'.format(i+1)
print filename
rgb_filenames.append(filename)
filename = root + '{:06d}-depth.png'.format(i+1)
print filename
depth_filenames.append(filename)
K = np.array([[1066.778, 0, 312.9869], [0, 1067.487, 241.3109], [0, 0, 1]])
meta_data = dict({'intrinsic_matrix': K, 'factor_depth': 10000.0})
print meta_data
cfg.GPU_ID = args.gpu_id
device_name = '/gpu:{:d}'.format(args.gpu_id)
print device_name
cfg.TRAIN.NUM_STEPS = 1
cfg.TRAIN.GRID_SIZE = cfg.TEST.GRID_SIZE
cfg.TRAIN.TRAINABLE = False
cfg.RIG = args.rig_name
cfg.CAD = args.cad_name
cfg.POSE = args.pose_name
cfg.BACKGROUND = args.background_name
cfg.IS_TRAIN = False
from networks.factory import get_network
network = get_network(args.network_name)
print 'Use network `{:s}` in training'.format(args.network_name)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
saver.restore(sess, args.model)
print ('Loading model weights from {:s}').format(args.model)
test_net_images(sess, network, imdb, weights_filename, rgb_filenames, depth_filenames, meta_data)
| false | true |
f71bc67e5ca5a22411122ca46bcf374816eb0293 | 2,950 | py | Python | alipay/aop/api/domain/AlipayFundAuthOperationCancelModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayFundAuthOperationCancelModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayFundAuthOperationCancelModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundAuthOperationCancelModel(object):
def __init__(self):
self._auth_no = None
self._operation_id = None
self._out_order_no = None
self._out_request_no = None
self._remark = None
@property
def auth_no(self):
return self._auth_no
@auth_no.setter
def auth_no(self, value):
self._auth_no = value
@property
def operation_id(self):
return self._operation_id
@operation_id.setter
def operation_id(self, value):
self._operation_id = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
def to_alipay_dict(self):
params = dict()
if self.auth_no:
if hasattr(self.auth_no, 'to_alipay_dict'):
params['auth_no'] = self.auth_no.to_alipay_dict()
else:
params['auth_no'] = self.auth_no
if self.operation_id:
if hasattr(self.operation_id, 'to_alipay_dict'):
params['operation_id'] = self.operation_id.to_alipay_dict()
else:
params['operation_id'] = self.operation_id
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundAuthOperationCancelModel()
if 'auth_no' in d:
o.auth_no = d['auth_no']
if 'operation_id' in d:
o.operation_id = d['operation_id']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'remark' in d:
o.remark = d['remark']
return o
| 29.207921 | 79 | 0.594576 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundAuthOperationCancelModel(object):
def __init__(self):
self._auth_no = None
self._operation_id = None
self._out_order_no = None
self._out_request_no = None
self._remark = None
@property
def auth_no(self):
return self._auth_no
@auth_no.setter
def auth_no(self, value):
self._auth_no = value
@property
def operation_id(self):
return self._operation_id
@operation_id.setter
def operation_id(self, value):
self._operation_id = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
def to_alipay_dict(self):
params = dict()
if self.auth_no:
if hasattr(self.auth_no, 'to_alipay_dict'):
params['auth_no'] = self.auth_no.to_alipay_dict()
else:
params['auth_no'] = self.auth_no
if self.operation_id:
if hasattr(self.operation_id, 'to_alipay_dict'):
params['operation_id'] = self.operation_id.to_alipay_dict()
else:
params['operation_id'] = self.operation_id
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundAuthOperationCancelModel()
if 'auth_no' in d:
o.auth_no = d['auth_no']
if 'operation_id' in d:
o.operation_id = d['operation_id']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'remark' in d:
o.remark = d['remark']
return o
| true | true |
f71bc6ac3785acf1ddb69695bfccc9bb6edc5daf | 155 | py | Python | pangram/pangram.py | pierrebeaucamp/Exercism-Python | 910b764c6726e9f131fb3a394c70d9b5bb167be9 | [
"Unlicense"
] | null | null | null | pangram/pangram.py | pierrebeaucamp/Exercism-Python | 910b764c6726e9f131fb3a394c70d9b5bb167be9 | [
"Unlicense"
] | null | null | null | pangram/pangram.py | pierrebeaucamp/Exercism-Python | 910b764c6726e9f131fb3a394c70d9b5bb167be9 | [
"Unlicense"
] | null | null | null | import string
import regex
def is_pangram(i):
l = sorted(list(set(regex.sub(r'[^a-z]', "", i.lower()))))
return l == list(string.ascii_lowercase)
| 22.142857 | 62 | 0.645161 | import string
import regex
def is_pangram(i):
l = sorted(list(set(regex.sub(r'[^a-z]', "", i.lower()))))
return l == list(string.ascii_lowercase)
| true | true |
f71bc6f28b8786902b54ebafec07bd4a22b0c3b6 | 763 | py | Python | my_blog/my_blog/apps/blogs/migrations/0002_articlecomment.py | TNTfeizai/my_Blog | 71c3720a27fab49a0d25d21029bdeb5f68e4965b | [
"MIT"
] | null | null | null | my_blog/my_blog/apps/blogs/migrations/0002_articlecomment.py | TNTfeizai/my_Blog | 71c3720a27fab49a0d25d21029bdeb5f68e4965b | [
"MIT"
] | 7 | 2020-06-06T00:53:43.000Z | 2022-03-12T00:19:03.000Z | my_blog/my_blog/apps/blogs/migrations/0002_articlecomment.py | TNTfeizai/my_Blog | 71c3720a27fab49a0d25d21029bdeb5f68e4965b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2020-03-13 02:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blogs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ArticleComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('username', models.CharField(max_length=50)),
('createtime', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
],
),
]
| 29.346154 | 114 | 0.605505 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blogs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ArticleComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('username', models.CharField(max_length=50)),
('createtime', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
],
),
]
| true | true |
f71bc746826b79bd70dea70d0dd90cfcbd116ff4 | 2,525 | py | Python | confirm.py | Pckool/GCG | cee786d04ea30f3995e910bca82635f442b2a6a8 | [
"MIT"
] | null | null | null | confirm.py | Pckool/GCG | cee786d04ea30f3995e910bca82635f442b2a6a8 | [
"MIT"
] | null | null | null | confirm.py | Pckool/GCG | cee786d04ea30f3995e910bca82635f442b2a6a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'confirm.ui'
#
# Created by: PyQt5 UI code generator 5.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 120)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(400, 120))
Dialog.setMaximumSize(QtCore.QSize(400, 150))
font = QtGui.QFont()
font.setFamily("Gadugi")
Dialog.setFont(font)
self.verticalLayoutWidget = QtWidgets.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 101))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Gadugi")
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.buttonBox = QtWidgets.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Yes)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "-"))
| 44.298246 | 122 | 0.717624 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 120)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(400, 120))
Dialog.setMaximumSize(QtCore.QSize(400, 150))
font = QtGui.QFont()
font.setFamily("Gadugi")
Dialog.setFont(font)
self.verticalLayoutWidget = QtWidgets.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 101))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Gadugi")
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.buttonBox = QtWidgets.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Yes)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "-"))
| true | true |
f71bc76ba618eb3ac748680ddd30d14592adde8c | 3,556 | py | Python | src/bindings/python/DocStrings/CDLTransform.py | jmertic/OpenColorIO | 9b18fd69f981288a6a3640e283b8d9968a15423e | [
"BSD-3-Clause"
] | 1 | 2019-11-18T21:49:25.000Z | 2019-11-18T21:49:25.000Z | src/bindings/python/DocStrings/CDLTransform.py | KevinJW/OpenColorIO | 412aa7ba273616867e607de646e4975791198812 | [
"BSD-3-Clause"
] | 1 | 2020-06-12T19:10:09.000Z | 2020-06-12T19:10:09.000Z | src/bindings/python/DocStrings/CDLTransform.py | KevinJW/OpenColorIO | 412aa7ba273616867e607de646e4975791198812 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
class CDLTransform:
"""
CDLTransform
"""
def __init__(self):
pass
def equals(self, cdl):
"""
equals(cdl)
:param cdl: a cdl transform
:type cdl: :py:class:`PyOpenColorIO.CDLTransform`
Returns True if cdl is equal to :py:class:`PyOpenColorIO.CDLTransform`.
"""
pass
def validate(self):
"""
validate()
Throw if :py:class:`PyOpenColorIO.CDLTransform` contains illegal parameters.
"""
pass
def CreateFromFile(self, src, cccid):
pass
def getXML(self):
pass
def setXML(self, xmltext):
pass
def getSlope(self):
pass
def getOffset(self):
pass
def getPower(self):
pass
def getSOP(self):
pass
def getSat(self):
pass
def setSlope(self, slope):
"""
setSlope(pyData)
Sets the slope ('S' part of SOP) in :py:class:`PyOpenColorIO.CDLTransform`.
:param pyData:
:type pyData: object
"""
pass
def setOffset(self, offset):
"""
setOffset(pyData)
Sets the offset ('O' part of SOP) in :py:class:`PyOpenColorIO.CDLTransform`.
:param pyData: list of three floats
:type pyData: object
"""
pass
def setPower(self, power):
"""
setPower(pyData)
Sets the power ('P' part of SOP) in :py:class:`PyOpenColorIO.CDLTransform`.
:param pyData: list of three floats
:type pyData: object
"""
pass
def setSOP(self, sop):
"""
setSOP(pyData)
Sets SOP in :py:class:`PyOpenColorIO.CDLTransform`.
:param pyData: list of nine floats
:type pyData: object
"""
pass
def setSat(self, sat):
"""
setSAT(pyData)
Sets SAT (saturation) in :py:class:`PyOpenColorIO.CDLTransform`.
:param pyData: saturation
:type pyData: float
"""
pass
def getSatLumaCoefs(self):
"""
getSatLumaCoefs(pyData)
Returns the SAT (saturation) and luma coefficients in :py:class:`PyOpenColorIO.CDLTransform`.
:return: saturation and luma coefficients
:rtype: list of floats
"""
pass
def getID(self):
"""
getID()
Returns the ID from :py:class:`PyOpenColorIO.CDLTransform`.
:return: ID
:rtype: string
"""
pass
def setID(self, id):
"""
setID(str)
Sets the ID in :py:class:`PyOpenColorIO.CDLTransform`.
:param str: ID
:type str: string
"""
pass
def getDescription(self):
"""
getDescription()
Returns the description of :py:class:`PyOpenColorIO.CDLTransform`.
:return: description
:rtype: string
"""
pass
def setDescription(self, desc):
"""
setDescription(str)
Sets the description of :py:class:`PyOpenColorIO.CDLTransform`.
:param str: description
:type str: string
"""
pass
| 21.815951 | 101 | 0.497469 |
class CDLTransform:
def __init__(self):
pass
def equals(self, cdl):
pass
def validate(self):
pass
def CreateFromFile(self, src, cccid):
pass
def getXML(self):
pass
def setXML(self, xmltext):
pass
def getSlope(self):
pass
def getOffset(self):
pass
def getPower(self):
pass
def getSOP(self):
pass
def getSat(self):
pass
def setSlope(self, slope):
pass
def setOffset(self, offset):
pass
def setPower(self, power):
pass
def setSOP(self, sop):
pass
def setSat(self, sat):
pass
def getSatLumaCoefs(self):
pass
def getID(self):
pass
def setID(self, id):
pass
def getDescription(self):
pass
def setDescription(self, desc):
pass
| true | true |
f71bc81b29ea3c83e679fc75c90333cfc9ee5e00 | 3,351 | py | Python | django_jinja/base.py | akx/django-jinja | 2f1c0a74990d6d564350079692a8307c8c6f5b9f | [
"BSD-3-Clause"
] | 210 | 2015-05-21T16:54:05.000Z | 2022-01-06T01:24:52.000Z | django_jinja/base.py | akx/django-jinja | 2f1c0a74990d6d564350079692a8307c8c6f5b9f | [
"BSD-3-Clause"
] | 139 | 2015-05-15T11:01:03.000Z | 2022-03-29T21:13:04.000Z | django_jinja/base.py | akx/django-jinja | 2f1c0a74990d6d564350079692a8307c8c6f5b9f | [
"BSD-3-Clause"
] | 84 | 2015-05-15T09:35:22.000Z | 2021-09-03T13:14:44.000Z | import re
import os.path as path
from importlib import import_module
from django.template.context import BaseContext
def dict_from_context(context):
"""
Converts context to native python dict.
"""
if isinstance(context, BaseContext):
new_dict = {}
for i in reversed(list(context)):
new_dict.update(dict_from_context(i))
return new_dict
return dict(context)
def _iter_templatetags_modules_list():
"""
Get list of modules that contains templatetags
submodule.
"""
from django.apps import apps
all_modules = [x.name for x in apps.get_app_configs()]
for app_path in all_modules:
try:
mod = import_module(app_path + ".templatetags")
# Empty folders can lead to unexpected behavior with Python 3.
# We make sure to have the `__file__` attribute.
if getattr(mod, '__file__', None) is not None:
yield (app_path, path.dirname(mod.__file__))
except ImportError:
pass
def patch_django_for_autoescape():
"""
Patch django modules for make them compatible with
jinja autoescape implementation.
"""
from django.utils import safestring
from django.forms.boundfield import BoundField
from django.forms.utils import ErrorList
from django.forms.utils import ErrorDict
if hasattr(safestring, "SafeText"):
if not hasattr(safestring.SafeText, "__html__"):
safestring.SafeText.__html__ = lambda self: str(self)
if hasattr(safestring, "SafeString"):
if not hasattr(safestring.SafeString, "__html__"):
safestring.SafeString.__html__ = lambda self: str(self)
if hasattr(safestring, "SafeUnicode"):
if not hasattr(safestring.SafeUnicode, "__html__"):
safestring.SafeUnicode.__html__ = lambda self: str(self)
if hasattr(safestring, "SafeBytes"):
if not hasattr(safestring.SafeBytes, "__html__"):
safestring.SafeBytes.__html__ = lambda self: str(self)
if not hasattr(BoundField, "__html__"):
BoundField.__html__ = lambda self: str(self)
if not hasattr(ErrorList, "__html__"):
ErrorList.__html__ = lambda self: str(self)
if not hasattr(ErrorDict, "__html__"):
ErrorDict.__html__ = lambda self: str(self)
def get_match_extension(using=None):
"""
Gets the extension that the template loader will match for
django-jinja. This returns Jinja2.match_extension.
The "using" parameter selects with Jinja2 backend to use if
you have multiple ones configured in settings.TEMPLATES.
If it is None and only one Jinja2 backend is defined then it
will use that, otherwise an ImproperlyConfigured exception
is thrown.
"""
from .backend import Jinja2
from django.template import engines
if using is None:
engine = Jinja2.get_default()
else:
engine = engines[using]
return engine.match_extension
def match_template(template_name, extension, regex):
if extension:
matches_extension = template_name.endswith(extension)
if regex:
return matches_extension and re.match(regex, template_name)
else:
return matches_extension
elif regex:
return re.match(regex, template_name)
else:
return True
| 30.463636 | 74 | 0.675321 | import re
import os.path as path
from importlib import import_module
from django.template.context import BaseContext
def dict_from_context(context):
if isinstance(context, BaseContext):
new_dict = {}
for i in reversed(list(context)):
new_dict.update(dict_from_context(i))
return new_dict
return dict(context)
def _iter_templatetags_modules_list():
from django.apps import apps
all_modules = [x.name for x in apps.get_app_configs()]
for app_path in all_modules:
try:
mod = import_module(app_path + ".templatetags")
if getattr(mod, '__file__', None) is not None:
yield (app_path, path.dirname(mod.__file__))
except ImportError:
pass
def patch_django_for_autoescape():
from django.utils import safestring
from django.forms.boundfield import BoundField
from django.forms.utils import ErrorList
from django.forms.utils import ErrorDict
if hasattr(safestring, "SafeText"):
if not hasattr(safestring.SafeText, "__html__"):
safestring.SafeText.__html__ = lambda self: str(self)
if hasattr(safestring, "SafeString"):
if not hasattr(safestring.SafeString, "__html__"):
safestring.SafeString.__html__ = lambda self: str(self)
if hasattr(safestring, "SafeUnicode"):
if not hasattr(safestring.SafeUnicode, "__html__"):
safestring.SafeUnicode.__html__ = lambda self: str(self)
if hasattr(safestring, "SafeBytes"):
if not hasattr(safestring.SafeBytes, "__html__"):
safestring.SafeBytes.__html__ = lambda self: str(self)
if not hasattr(BoundField, "__html__"):
BoundField.__html__ = lambda self: str(self)
if not hasattr(ErrorList, "__html__"):
ErrorList.__html__ = lambda self: str(self)
if not hasattr(ErrorDict, "__html__"):
ErrorDict.__html__ = lambda self: str(self)
def get_match_extension(using=None):
from .backend import Jinja2
from django.template import engines
if using is None:
engine = Jinja2.get_default()
else:
engine = engines[using]
return engine.match_extension
def match_template(template_name, extension, regex):
if extension:
matches_extension = template_name.endswith(extension)
if regex:
return matches_extension and re.match(regex, template_name)
else:
return matches_extension
elif regex:
return re.match(regex, template_name)
else:
return True
| true | true |
f71bc829a4dec68143103f059f2183f57ed5bcb0 | 903 | py | Python | beacontools/structs/estimote.py | nasa-watchdog/beacontools-ucsb | 37e60b14f73935501c8edc3277917a6a40bcfdba | [
"MIT"
] | null | null | null | beacontools/structs/estimote.py | nasa-watchdog/beacontools-ucsb | 37e60b14f73935501c8edc3277917a6a40bcfdba | [
"MIT"
] | 1 | 2018-12-08T01:43:02.000Z | 2018-12-08T01:43:02.000Z | beacontools/structs/estimote.py | nasa-watchdog/beacontools-ucsb | 37e60b14f73935501c8edc3277917a6a40bcfdba | [
"MIT"
] | null | null | null | """All low level structures used for parsing Estimote packets."""
from construct import Struct, Byte, Switch, Int8sl, Array, Int8ul
from ..const import ESTIMOTE_TELEMETRY_SUBFRAME_A, ESTIMOTE_TELEMETRY_SUBFRAME_B
# pylint: disable=invalid-name
EstimoteTelemetrySubFrameA = Struct(
"acceleration" / Array(3, Int8sl),
"previous_motion" / Byte,
"current_motion" / Byte,
"combined_fields" / Array(5, Byte),
)
EstimoteTelemetrySubFrameB = Struct(
"magnetic_field" / Array(3, Int8sl),
"ambient_light" / Int8ul,
"combined_fields" / Array(5, Byte),
"battery_level" / Int8ul,
)
EstimoteTelemetryFrame = Struct(
"identifier" / Array(8, Byte),
"subframe_type" / Byte,
"sub_frame" / Switch(lambda ctx: ctx.subframe_type, {
ESTIMOTE_TELEMETRY_SUBFRAME_A: EstimoteTelemetrySubFrameA,
ESTIMOTE_TELEMETRY_SUBFRAME_B: EstimoteTelemetrySubFrameB,
})
)
| 30.1 | 80 | 0.723145 | from construct import Struct, Byte, Switch, Int8sl, Array, Int8ul
from ..const import ESTIMOTE_TELEMETRY_SUBFRAME_A, ESTIMOTE_TELEMETRY_SUBFRAME_B
EstimoteTelemetrySubFrameA = Struct(
"acceleration" / Array(3, Int8sl),
"previous_motion" / Byte,
"current_motion" / Byte,
"combined_fields" / Array(5, Byte),
)
EstimoteTelemetrySubFrameB = Struct(
"magnetic_field" / Array(3, Int8sl),
"ambient_light" / Int8ul,
"combined_fields" / Array(5, Byte),
"battery_level" / Int8ul,
)
EstimoteTelemetryFrame = Struct(
"identifier" / Array(8, Byte),
"subframe_type" / Byte,
"sub_frame" / Switch(lambda ctx: ctx.subframe_type, {
ESTIMOTE_TELEMETRY_SUBFRAME_A: EstimoteTelemetrySubFrameA,
ESTIMOTE_TELEMETRY_SUBFRAME_B: EstimoteTelemetrySubFrameB,
})
)
| true | true |
f71bc88e1e773fe6f8bdbbed540d8fa994959788 | 1,394 | py | Python | setup.py | michaelimfeld/private-telegram-bot | f5a9cec3c430d46bab3f1e57faa4d62013f93fd1 | [
"MIT"
] | 37 | 2016-05-02T18:50:55.000Z | 2021-09-21T17:53:44.000Z | setup.py | michaelimfeld/private-telegram-bot | f5a9cec3c430d46bab3f1e57faa4d62013f93fd1 | [
"MIT"
] | 12 | 2016-04-23T21:58:42.000Z | 2020-03-25T18:31:29.000Z | setup.py | michaelimfeld/private-telegram-bot | f5a9cec3c430d46bab3f1e57faa4d62013f93fd1 | [
"MIT"
] | 8 | 2016-05-09T17:37:21.000Z | 2021-09-21T17:53:45.000Z | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
from setuptools import setup
setup(
name="ownbot",
version="0.0.4",
license="MIT",
description="Python module to create private telegram bots.",
author="Michael Imfeld",
author_email="michaelimfeld@crooked.ch",
maintainer="Michael Imfeld",
maintainer_email="michaelimfeld@crooked.ch",
platforms=["Linux", "Windows", "MAC OS X"],
url="https://github.com/michaelimfeld/ownbot",
download_url="https://github.com/michaelimfeld/ownbot",
packages=["ownbot"],
package_data={"": ["*.md"]},
install_requires=[
"python-telegram-bot",
"PyYAML"
],
include_package_data=True,
keywords=[
"ownbot", "python",
"telegram", "bot"
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Topic :: Education :: Testing",
"Topic :: Software Development",
]
)
| 30.977778 | 65 | 0.60043 |
from setuptools import setup
setup(
name="ownbot",
version="0.0.4",
license="MIT",
description="Python module to create private telegram bots.",
author="Michael Imfeld",
author_email="michaelimfeld@crooked.ch",
maintainer="Michael Imfeld",
maintainer_email="michaelimfeld@crooked.ch",
platforms=["Linux", "Windows", "MAC OS X"],
url="https://github.com/michaelimfeld/ownbot",
download_url="https://github.com/michaelimfeld/ownbot",
packages=["ownbot"],
package_data={"": ["*.md"]},
install_requires=[
"python-telegram-bot",
"PyYAML"
],
include_package_data=True,
keywords=[
"ownbot", "python",
"telegram", "bot"
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Topic :: Education :: Testing",
"Topic :: Software Development",
]
)
| true | true |
f71bc8aff2afd70932bd36b53d5ab0c39172c419 | 2,379 | py | Python | hw1/run_expert.py | Sebastianvarv/rl-homework | b7526ac3c86cbaae6b796856c31fc4c671a32663 | [
"MIT"
] | 1 | 2018-12-06T18:16:58.000Z | 2018-12-06T18:16:58.000Z | hw1/run_expert.py | Sebastianvarv/rl-homework | b7526ac3c86cbaae6b796856c31fc4c671a32663 | [
"MIT"
] | null | null | null | hw1/run_expert.py | Sebastianvarv/rl-homework | b7526ac3c86cbaae6b796856c31fc4c671a32663 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Code to load an expert policy and generate roll-out data for behavioral cloning.
Example usage:
python run_expert.py experts/Humanoid-v1.pkl Humanoid-v1 --render \
--num_rollouts 20
Author of this script and included expert policies: Jonathan Ho (hoj@openai.com)
"""
import os
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert roll outs')
args = parser.parse_args()
print('loading and building expert policy')
policy_fn = load_policy.load_policy(args.expert_policy_file)
print('loaded and built')
with tf.Session():
tf_util.initialize()
import gym
env = gym.make(args.envname)
max_steps = args.max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(args.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None,:])
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
expert_data = {'observations': np.array(observations),
'actions': np.array(actions)}
#
# with open(os.path.join('expert_data', args.envname + '.pkl'), 'wb') as f:
# pickle.dump(expert_data, f
# , pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
| 30.5 | 83 | 0.580076 |
import os
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert roll outs')
args = parser.parse_args()
print('loading and building expert policy')
policy_fn = load_policy.load_policy(args.expert_policy_file)
print('loaded and built')
with tf.Session():
tf_util.initialize()
import gym
env = gym.make(args.envname)
max_steps = args.max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(args.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None,:])
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
expert_data = {'observations': np.array(observations),
'actions': np.array(actions)}
if __name__ == '__main__':
main()
| true | true |
f71bc95531a6537981172b3ab30077c9090d2668 | 669 | py | Python | Latest/venv/Lib/site-packages/envisage/unknown_extension.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-01-09T20:04:31.000Z | 2022-01-09T20:04:31.000Z | Latest/venv/Lib/site-packages/envisage/unknown_extension.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-02-15T12:01:57.000Z | 2022-03-24T19:48:47.000Z | Latest/venv/Lib/site-packages/envisage/unknown_extension.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2007-2019 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
""" The exception raised when an unknown extension is referenced. """
class UnknownExtension(Exception):
""" The exception raised when an unknown extension is referenced. """
#### EOF ######################################################################
| 39.352941 | 79 | 0.678625 |
class UnknownExtension(Exception):
| true | true |
f71bc9ee1184e124fd8b7941320776f895fa5014 | 88,529 | py | Python | clearml/storage/helper.py | mmiller-max/clearml | fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8 | [
"Apache-2.0"
] | null | null | null | clearml/storage/helper.py | mmiller-max/clearml | fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8 | [
"Apache-2.0"
] | null | null | null | clearml/storage/helper.py | mmiller-max/clearml | fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8 | [
"Apache-2.0"
] | null | null | null | from __future__ import with_statement
import errno
import getpass
import itertools
import json
import os
import shutil
import sys
import threading
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from datetime import datetime
from multiprocessing.pool import ThreadPool
from tempfile import mktemp
from time import time
from types import GeneratorType
import requests
import six
from _socket import gethostname
from attr import attrs, attrib, asdict
from furl import furl
from pathlib2 import Path
from requests.exceptions import ConnectionError
from six import binary_type, StringIO
from six.moves.queue import Queue, Empty
from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import url2pathname
from .callbacks import UploadProgressReport, DownloadProgressReport
from .util import quote_url
from ..backend_api.utils import get_http_session_with_retry
from ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations, AzureContainerConfigurations
from ..config import config, deferred_config
from ..debugging import get_logger
from ..errors import UsageError
class StorageError(Exception):
pass
class DownloadError(Exception):
pass
@six.add_metaclass(ABCMeta)
class _Driver(object):
@classmethod
def get_logger(cls):
return get_logger('storage')
@abstractmethod
def get_container(self, container_name, config=None, **kwargs):
pass
@abstractmethod
def test_upload(self, test_path, config, **kwargs):
pass
@abstractmethod
def upload_object_via_stream(self, iterator, container, object_name, extra, **kwargs):
pass
@abstractmethod
def list_container_objects(self, container, ex_prefix, **kwargs):
pass
@abstractmethod
def get_direct_access(self, remote_path, **kwargs):
pass
@abstractmethod
def download_object(self, obj, local_path, overwrite_existing, delete_on_failure, callback, **kwargs):
pass
@abstractmethod
def download_object_as_stream(self, obj, chunk_size, **kwargs):
pass
@abstractmethod
def delete_object(self, obj, **kwargs):
pass
@abstractmethod
def upload_object(self, file_path, container, object_name, extra, **kwargs):
pass
@abstractmethod
def get_object(self, container_name, object_name, **kwargs):
pass
class StorageHelper(object):
""" Storage helper.
Used by the entire system to download/upload files.
Supports both local and remote files (currently local files, network-mapped files, HTTP/S and Amazon S3)
"""
_temp_download_suffix = '.partially'
@classmethod
def _get_logger(cls):
return get_logger('storage')
@attrs
class _PathSubstitutionRule(object):
registered_prefix = attrib(type=str)
local_prefix = attrib(type=str)
replace_windows_sep = attrib(type=bool)
replace_linux_sep = attrib(type=bool)
path_substitution_config = 'storage.path_substitution'
@classmethod
def load_list_from_config(cls):
rules_list = []
for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())):
rule = cls(
registered_prefix=sub_config.get('registered_prefix', None),
local_prefix=sub_config.get('local_prefix', None),
replace_windows_sep=sub_config.get('replace_windows_sep', False),
replace_linux_sep=sub_config.get('replace_linux_sep', False),
)
if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)):
StorageHelper._get_logger().warning(
"Illegal substitution rule configuration '{}[{}]': {}".format(
cls.path_substitution_config,
index,
asdict(rule),
))
continue
if all((rule.replace_windows_sep, rule.replace_linux_sep)):
StorageHelper._get_logger().warning(
"Only one of replace_windows_sep and replace_linux_sep flags may be set."
"'{}[{}]': {}".format(
cls.path_substitution_config,
index,
asdict(rule),
))
continue
rules_list.append(rule)
return rules_list
class _UploadData(object):
@property
def src_path(self):
return self._src_path
@property
def dest_path(self):
return self._dest_path
@property
def extra(self):
return self._extra
@property
def callback(self):
return self._callback
@property
def retries(self):
return self._retries
def __init__(self, src_path, dest_path, extra, callback, retries):
self._src_path = src_path
self._dest_path = dest_path
self._extra = extra
self._callback = callback
self._retries = retries
def __str__(self):
return "src=%s" % self.src_path
_helpers = {} # cache of helper instances
# global terminate event for async upload threads
_terminate = threading.Event()
_async_upload_threads = set()
_upload_pool = None
# collect all bucket credentials that aren't empty (ignore entries with an empty key or secret)
_s3_configurations = deferred_config('aws.s3', {}, transform=S3BucketConfigurations.from_config)
_gs_configurations = deferred_config('google.storage', {}, transform=GSBucketConfigurations.from_config)
_azure_configurations = deferred_config('azure.storage', {}, transform=AzureContainerConfigurations.from_config)
_path_substitutions = deferred_config(transform=_PathSubstitutionRule.load_list_from_config)
@property
def log(self):
return self._log
@property
def scheme(self):
return self._scheme
@property
def secure(self):
return self._secure
@property
def base_url(self):
return self._base_url
@classmethod
def get(cls, url, logger=None, **kwargs):
"""
Get a storage helper instance for the given URL
:return: A StorageHelper instance.
"""
# Handle URL substitution etc before locating the correct storage driver
url = cls._canonize_url(url)
# Get the credentials we should use for this url
base_url = cls._resolve_base_url(url)
instance_key = '%s_%s' % (base_url, threading.current_thread().ident or 0)
force_create = kwargs.pop('__force_create', False)
if (instance_key in cls._helpers) and (not force_create):
return cls._helpers[instance_key]
# Don't canonize URL since we already did it
try:
instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs)
except (StorageError, UsageError) as ex:
cls._get_logger().error(str(ex))
return None
except Exception as ex:
cls._get_logger().error("Failed creating storage object {} Reason: {}".format(
base_url or url, ex))
return None
cls._helpers[instance_key] = instance
return instance
@classmethod
def get_local_copy(cls, remote_url):
"""
Download a file from remote URL to a local storage, and return path to local copy,
:param remote_url: Remote URL. Example: https://example.com/file.jpg s3://bucket/folder/file.mp4 etc.
:return: Path to local copy of the downloaded file. None if error occurred.
"""
helper = cls.get(remote_url)
if not helper:
return None
# create temp file with the requested file name
file_name = '.' + remote_url.split('/')[-1].split(os.path.sep)[-1]
local_path = mktemp(suffix=file_name)
return helper.download_to_file(remote_url, local_path)
def __init__(self, base_url, url, key=None, secret=None, region=None, verbose=False, logger=None, retries=5,
**kwargs):
level = config.get('storage.log.level', None)
if level:
try:
self._get_logger().setLevel(level)
except (TypeError, ValueError):
self._get_logger().error('invalid storage log level in configuration: %s' % level)
self._log = logger or self._get_logger()
self._verbose = verbose
self._retries = retries
self._extra = {}
self._base_url = base_url
self._secure = True
self._driver = None
self._container = None
self._conf = None
if kwargs.get('canonize_url', True):
url = self._canonize_url(url)
parsed = urlparse(url)
self._scheme = parsed.scheme
if self._scheme == _AzureBlobServiceStorageDriver.scheme:
self._conf = copy(self._azure_configurations.get_config_by_uri(url))
if self._conf is None:
raise StorageError("Missing Azure Blob Storage configuration for {}".format(url))
if not self._conf.account_name or not self._conf.account_key:
raise StorageError(
"Missing account name or key for Azure Blob Storage access for {}".format(base_url)
)
self._driver = _AzureBlobServiceStorageDriver()
self._container = self._driver.get_container(config=self._conf)
elif self._scheme == _Boto3Driver.scheme:
self._conf = copy(self._s3_configurations.get_config_by_uri(url))
self._secure = self._conf.secure
final_region = region if region else self._conf.region
if not final_region:
final_region = None
self._conf.update(
key=key or self._conf.key,
secret=secret or self._conf.secret,
multipart=self._conf.multipart,
region=final_region,
use_credentials_chain=self._conf.use_credentials_chain
)
if not self._conf.use_credentials_chain:
if not self._conf.key or not self._conf.secret:
raise ValueError(
"Missing key and secret for S3 storage access (%s)" % base_url
)
self._driver = _Boto3Driver()
self._container = self._driver.get_container(container_name=self._base_url, retries=retries,
config=self._conf)
elif self._scheme == _GoogleCloudStorageDriver.scheme:
self._conf = copy(self._gs_configurations.get_config_by_uri(url))
self._driver = _GoogleCloudStorageDriver()
self._container = self._driver.get_container(
container_name=self._base_url,
config=self._conf
)
elif self._scheme in _HttpDriver.schemes:
self._driver = _HttpDriver(retries=retries)
self._container = self._driver.get_container(container_name=self._base_url)
else: # elif self._scheme == 'file':
# if this is not a known scheme assume local file
# If the scheme is file, use only the path segment, If not, use the entire URL
if self._scheme == 'file':
url = parsed.path
url = url.replace("\\", "/")
# url2pathname is specifically intended to operate on (urlparse result).path
# and returns a cross-platform compatible result
driver_uri = url2pathname(url)
path_driver_uri = Path(driver_uri)
# if path_driver_uri.is_file():
# driver_uri = str(path_driver_uri.parent)
# elif not path_driver_uri.exists():
# # assume a folder and create
# # Path(driver_uri).mkdir(parents=True, exist_ok=True)
# pass
self._driver = _FileStorageDriver(str(path_driver_uri.root))
self._container = None
@classmethod
def terminate_uploads(cls, force=True, timeout=2.0):
if force:
# since async uploaders are daemon threads, we can just return and let them close by themselves
return
# signal all threads to terminate and give them a chance for 'timeout' seconds (total, not per-thread)
cls._terminate.set()
remaining_timeout = timeout
for thread in cls._async_upload_threads:
t = time()
try:
thread.join(timeout=remaining_timeout)
except Exception:
pass
remaining_timeout -= (time() - t)
@classmethod
def get_configuration(cls, bucket_config):
return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host)
@classmethod
def add_configuration(cls, bucket_config, log=None, _test_config=True):
# Try to use existing configuration if we have no key and secret
use_existing = not bucket_config.is_valid()
# Get existing config anyway (we'll either try to use it or alert we're replacing it
existing = cls.get_configuration(bucket_config)
configs = cls._s3_configurations
if not use_existing:
# Test bucket config, fails if unsuccessful
if _test_config:
_Boto3Driver._test_bucket_config(bucket_config, log)
if existing:
if log:
log.warning('Overriding existing configuration for %s/%s'
% (existing.host or 'AWS', existing.bucket))
configs.remove_config(existing)
else:
# Try to use existing configuration
good_config = False
if existing:
if log:
log.info('Using existing credentials for bucket %s/%s'
% (bucket_config.host or 'AWS', bucket_config.bucket))
good_config = _Boto3Driver._test_bucket_config(existing, log, raise_on_error=False)
if not good_config:
# Try to use global key/secret
configs.update_config_with_defaults(bucket_config)
if log:
log.info('Using global credentials for bucket %s/%s'
% (bucket_config.host or 'AWS', bucket_config.bucket))
if _test_config:
_Boto3Driver._test_bucket_config(bucket_config, log)
else:
# do not add anything, existing config is OK
return
configs.add_config(bucket_config)
@classmethod
def add_path_substitution(
cls,
registered_prefix,
local_prefix,
replace_windows_sep=False,
replace_linux_sep=False,
):
"""
Add a path substitution rule for storage paths.
Useful for case where the data was registered under some path, and that
path was later renamed. This may happen with local storage paths where
each machine is has different mounts or network drives configurations
:param registered_prefix: The prefix to search for and replace. This is
the prefix of the path the data is registered under. This should be the
exact url prefix, case sensitive, as the data is registered.
:param local_prefix: The prefix to replace 'registered_prefix' with. This
is the prefix of the path the data is actually saved under. This should be the
exact url prefix, case sensitive, as the data is saved under.
:param replace_windows_sep: If set to True, and the prefix matches, the rest
of the url has all of the windows path separators (backslash '\') replaced with
the native os path separator.
:param replace_linux_sep: If set to True, and the prefix matches, the rest
of the url has all of the linux/unix path separators (slash '/') replaced with
the native os path separator.
"""
if not registered_prefix or not local_prefix:
raise UsageError("Path substitution prefixes must be non empty strings")
if replace_windows_sep and replace_linux_sep:
raise UsageError("Only one of replace_windows_sep and replace_linux_sep may be set.")
rule = cls._PathSubstitutionRule(
registered_prefix=registered_prefix,
local_prefix=local_prefix,
replace_windows_sep=replace_windows_sep,
replace_linux_sep=replace_linux_sep,
)
cls._path_substitutions.append(rule)
@classmethod
def clear_path_substitutions(cls):
"""
Removes all path substitution rules, including ones from the configuration file.
"""
cls._path_substitutions = list()
def verify_upload(self, folder_uri='', raise_on_error=True, log_on_error=True):
"""
Verify that this helper can upload files to a folder.
An upload is possible iff:
1. the destination folder is under the base uri of the url used to create the helper
2. the helper has credentials to write to the destination folder
:param folder_uri: The destination folder to test. Must be an absolute
url that begins with the base uri of the url used to create the helper.
:param raise_on_error: Raise an exception if an upload is not possible
:param log_on_error: Log an error if an upload is not possible
:return: True, if, and only if, an upload to folder_uri is possible.
"""
folder_uri = self._canonize_url(folder_uri)
folder_uri = self.conform_url(folder_uri, self._base_url)
test_path = self._normalize_object_name(folder_uri)
if self._scheme == _Boto3Driver.scheme:
_Boto3Driver._test_bucket_config(
self._conf,
self._log,
test_path=test_path,
raise_on_error=raise_on_error,
log_on_error=log_on_error,
)
elif self._scheme == _GoogleCloudStorageDriver.scheme:
self._driver.test_upload(test_path, self._conf)
elif self._scheme == 'file':
# Check path exists
Path(test_path).mkdir(parents=True, exist_ok=True)
# check path permissions
Path(test_path).touch(exist_ok=True)
return folder_uri
def upload_from_stream(self, stream, dest_path, extra=None, retries=1):
dest_path = self._canonize_url(dest_path)
object_name = self._normalize_object_name(dest_path)
extra = extra.copy() if extra else {}
extra.update(self._extra)
last_ex = None
cb = UploadProgressReport.from_stream(stream, object_name, self._verbose, self._log)
for i in range(max(1, retries)):
try:
self._driver.upload_object_via_stream(
iterator=stream,
container=self._container,
object_name=object_name,
callback=cb,
extra=extra)
last_ex = None
break
except Exception as ex:
last_ex = ex
# seek to beginning if possible
# noinspection PyBroadException
try:
stream.seek(0)
except Exception:
pass
if last_ex:
raise last_ex
if self.scheme in _HttpDriver.schemes:
# quote link
dest_path = quote_url(dest_path)
return dest_path
def upload(self, src_path, dest_path=None, extra=None, async_enable=False, cb=None, retries=1):
if not dest_path:
dest_path = os.path.basename(src_path)
dest_path = self._canonize_url(dest_path)
if cb and self.scheme in _HttpDriver.schemes:
# store original callback
a_cb = cb
# quote link
def callback(a_path):
return a_cb(quote_url(a_path) if a_path else a_path)
# replace callback with wrapper
cb = callback
if async_enable:
data = self._UploadData(src_path=src_path, dest_path=dest_path, extra=extra, callback=cb, retries=retries)
StorageHelper._initialize_upload_pool()
return StorageHelper._upload_pool.apply_async(self._do_async_upload, args=(data,))
else:
res = self._do_upload(src_path, dest_path, extra, cb, verbose=False, retries=retries)
if res:
res = quote_url(res)
return res
def list(self, prefix=None):
"""
List entries in the helper base path.
Return a list of names inside this helper base path. The base path is
determined at creation time and is specific for each storage medium.
For Google Storage and S3 it is the bucket of the path.
For local files it is the root directory.
This operation is not supported for http and https protocols.
:param prefix: If None, return the list as described above. If not, it
must be a string - the path of a sub directory under the base path.
the returned list will include only objects under that subdir.
:return: The paths of all the objects in the storage base
path under prefix. Listed relative to the base path.
"""
if prefix:
if prefix.startswith(self._base_url):
prefix = prefix[len(self.base_url):].lstrip("/")
try:
res = self._driver.list_container_objects(self._container, ex_prefix=prefix)
except TypeError:
res = self._driver.list_container_objects(self._container)
return [
obj.name
for obj in res if
obj.name.startswith(prefix) and obj.name != prefix
]
else:
return [obj.name for obj in self._driver.list_container_objects(self._container)]
def download_to_file(self, remote_path, local_path, overwrite_existing=False, delete_on_failure=True, verbose=None):
def next_chunk(astream):
if isinstance(astream, binary_type):
chunk = astream
astream = None
elif astream:
try:
chunk = next(astream)
except StopIteration:
chunk = None
else:
chunk = None
return chunk, astream
remote_path = self._canonize_url(remote_path)
verbose = self._verbose if verbose is None else verbose
# Check if driver type supports direct access:
direct_access_path = self._driver.get_direct_access(remote_path)
if direct_access_path:
return direct_access_path
temp_local_path = None
try:
if verbose:
self._log.info('Start downloading from %s' % remote_path)
if not overwrite_existing and Path(local_path).is_file():
self._log.warning(
'File {} already exists, no need to download, thread id = {}'.format(
local_path,
threading.current_thread().ident,
),
)
return local_path
# we download into temp_local_path so that if we accidentally stop in the middle,
# we won't think we have the entire file
temp_local_path = '{}_{}{}'.format(local_path, time(), self._temp_download_suffix)
obj = self._get_object(remote_path)
if not obj:
return None
# object size in bytes
total_size_mb = -1
dl_total_mb = 0.
download_reported = False
# chunks size is ignored and always 5Mb
chunk_size_mb = 5
# make sure we have the destination folder
# noinspection PyBroadException
Path(temp_local_path).parent.mkdir(parents=True, exist_ok=True)
# try to get file size
try:
if isinstance(self._driver, _HttpDriver) and obj:
obj = self._driver._get_download_object(obj)
total_size_mb = float(obj.headers.get('Content-Length', 0)) / (1024 * 1024)
elif hasattr(obj, 'size'):
size = obj.size
# Google storage has the option to reload the object to get the size
if size is None and hasattr(obj, 'reload'):
obj.reload()
size = obj.size
total_size_mb = 0 if size is None else float(size) / (1024 * 1024)
elif hasattr(obj, 'content_length'):
total_size_mb = float(obj.content_length) / (1024 * 1024)
except (ValueError, AttributeError, KeyError):
pass
# if driver supports download with callback, use it (it might be faster)
if hasattr(self._driver, 'download_object'):
# callback
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self._log)
self._driver.download_object(obj, temp_local_path, callback=cb)
download_reported = bool(cb.last_reported)
dl_total_mb = cb.current_status_mb
else:
stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024)
if stream is None:
raise ValueError('Could not download %s' % remote_path)
with open(temp_local_path, 'wb') as fd:
data, stream = next_chunk(stream)
while data:
fd.write(data)
data, stream = next_chunk(stream)
if Path(temp_local_path).stat().st_size <= 0:
raise Exception('downloaded a 0-sized file')
# if we are on windows, we need to remove the target file before renaming
# otherwise posix rename will overwrite the target
if os.name != 'posix':
try:
os.remove(local_path)
except Exception:
pass
# rename temp file to local_file
# noinspection PyBroadException
try:
os.rename(temp_local_path, local_path)
except Exception:
# noinspection PyBroadException
try:
os.unlink(temp_local_path)
except Exception:
pass
# file was downloaded by a parallel process, check we have the final output and delete the partial copy
path_local_path = Path(local_path)
if not path_local_path.is_file() or path_local_path.stat().st_size <= 0:
raise Exception('Failed renaming partial file, downloaded file exists and a 0-sized file')
# report download if we are on the second chunk
if verbose or download_reported:
self._log.info(
'Downloaded %.2f MB successfully from %s , saved to %s' % (dl_total_mb, remote_path, local_path))
return local_path
except DownloadError:
raise
except Exception as e:
self._log.error("Could not download {} , err: {} ".format(remote_path, e))
if delete_on_failure:
# noinspection PyBroadException
try:
if temp_local_path:
os.remove(temp_local_path)
except Exception:
pass
return None
def download_as_stream(self, remote_path, chunk_size=None):
remote_path = self._canonize_url(remote_path)
try:
obj = self._get_object(remote_path)
return self._driver.download_object_as_stream(
obj, chunk_size=chunk_size, verbose=self._verbose, log=self.log
)
except DownloadError:
raise
except Exception as e:
self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e)))
return None
def download_as_nparray(self, remote_path, chunk_size=None):
try:
stream = self.download_as_stream(remote_path, chunk_size)
if stream is None:
return
# TODO: ugly py3 hack, please remove ASAP
if six.PY3 and not isinstance(stream, GeneratorType):
import numpy as np
return np.frombuffer(stream, dtype=np.uint8)
else:
import numpy as np
return np.asarray(bytearray(b''.join(stream)), dtype=np.uint8)
except Exception as e:
self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e)))
def delete(self, path):
return self._driver.delete_object(self._get_object(path))
def check_write_permissions(self, dest_path=None):
# create a temporary file, then delete it
base_url = dest_path or self._base_url
dest_path = base_url + '/.clearml.test'
# do not check http/s connection permissions
if dest_path.startswith('http'):
return True
try:
self.upload_from_stream(stream=six.BytesIO(b'clearml'), dest_path=dest_path)
self.delete(path=dest_path)
except Exception:
raise ValueError('Insufficient permissions for {}'.format(base_url))
return True
@classmethod
def download_from_url(cls, remote_url, local_path, overwrite_existing=False):
"""
Download a file from remote URL to a local storage
:param remote_url: Remote URL. Example: https://example.com/image.jpg or s3://bucket/folder/file.mp4 etc.
:param local_path: target location for downloaded file. Example: /tmp/image.jpg
:param overwrite_existing: If True and local_path exists, it will overwrite it, otherwise print warning
:return: local_path if download was successful.
"""
helper = cls.get(remote_url)
if not helper:
return None
return helper.download_to_file(remote_url, local_path, overwrite_existing=overwrite_existing)
@classmethod
def _canonize_url(cls, url):
return cls._apply_url_substitutions(url)
@classmethod
def _apply_url_substitutions(cls, url):
def replace_separator(_url, where, sep):
return _url[:where] + _url[where:].replace(sep, os.sep)
for index, rule in enumerate(cls._path_substitutions):
if url.startswith(rule.registered_prefix):
url = url.replace(
rule.registered_prefix,
rule.local_prefix,
1, # count. str.replace() does not support keyword arguments
)
if rule.replace_windows_sep:
url = replace_separator(url, len(rule.local_prefix), '\\')
if rule.replace_linux_sep:
url = replace_separator(url, len(rule.local_prefix), '/')
break
return url
@classmethod
def _resolve_base_url(cls, base_url):
parsed = urlparse(base_url)
if parsed.scheme == _Boto3Driver.scheme:
conf = cls._s3_configurations.get_config_by_uri(base_url)
bucket = conf.bucket
if not bucket:
parts = Path(parsed.path.strip('/')).parts
if parts:
bucket = parts[0]
return '/'.join(x for x in ('s3:/', conf.host, bucket) if x)
elif parsed.scheme == _AzureBlobServiceStorageDriver.scheme:
conf = cls._azure_configurations.get_config_by_uri(base_url)
if not conf:
raise StorageError("Can't find azure configuration for {}".format(base_url))
return str(furl(base_url).set(path=conf.container_name))
elif parsed.scheme == _GoogleCloudStorageDriver.scheme:
conf = cls._gs_configurations.get_config_by_uri(base_url)
return str(furl(scheme=parsed.scheme, netloc=conf.bucket))
elif parsed.scheme == 'http':
return 'http://'
elif parsed.scheme == 'https':
return 'https://'
else: # if parsed.scheme == 'file':
# if we do not know what it is, we assume file
return 'file://'
@classmethod
def conform_url(cls, folder_uri, base_url=None):
if not folder_uri:
return folder_uri
_base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url
if not folder_uri.startswith(_base_url):
prev_folder_uri = folder_uri
if _base_url == 'file://':
folder_uri = str(Path(folder_uri).absolute())
if folder_uri.startswith('/'):
folder_uri = _base_url + folder_uri
else:
folder_uri = '/'.join((_base_url, folder_uri))
cls._get_logger().debug('Upload destination {} amended to {} for registration purposes'.format(
prev_folder_uri, folder_uri))
else:
raise ValueError('folder_uri: {} does not start with base url: {}'.format(folder_uri, _base_url))
return folder_uri
def _absolute_object_name(self, path):
""" Returns absolute remote path, including any prefix that is handled by the container """
if not path.startswith(self.base_url):
return self.base_url.rstrip('/') + '///' + path.lstrip('/')
return path
def _normalize_object_name(self, path):
""" Normalize remote path. Remove any prefix that is already handled by the container """
if path.startswith(self.base_url):
path = path[len(self.base_url):]
if path.startswith('/') and os.name == 'nt':
path = path[1:]
if self.scheme in (_Boto3Driver.scheme, _GoogleCloudStorageDriver.scheme,
_AzureBlobServiceStorageDriver.scheme):
path = path.lstrip('/')
return path
def _do_async_upload(self, data):
assert isinstance(data, self._UploadData)
return self._do_upload(data.src_path, data.dest_path, extra=data.extra, cb=data.callback,
verbose=True, retries=data.retries)
def _upload_from_file(self, local_path, dest_path, extra=None):
if not hasattr(self._driver, 'upload_object'):
with open(local_path, 'rb') as stream:
res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra)
else:
object_name = self._normalize_object_name(dest_path)
extra = extra.copy() if extra else {}
extra.update(self._extra)
cb = UploadProgressReport.from_file(local_path, self._verbose, self._log)
res = self._driver.upload_object(
file_path=local_path,
container=self._container,
object_name=object_name,
callback=cb,
extra=extra)
return res
def _do_upload(self, src_path, dest_path, extra=None, cb=None, verbose=False, retries=1):
object_name = self._normalize_object_name(dest_path)
if cb:
try:
cb(None)
except Exception as e:
self._log.error("Calling upload callback when starting upload: %s" % str(e))
if verbose:
msg = 'Starting upload: {} => {}{}'.format(
src_path,
(self._container.name if self._container.name.endswith('/') else self._container.name + '/')
if self._container and self._container.name else '', object_name)
if object_name.startswith('file://') or object_name.startswith('/'):
self._log.debug(msg)
else:
self._log.info(msg)
last_ex = None
for i in range(max(1, retries)):
try:
if not self._upload_from_file(local_path=src_path, dest_path=dest_path, extra=extra):
# retry if failed
last_ex = ValueError("Upload failed")
continue
last_ex = None
break
except Exception as e:
last_ex = e
if last_ex:
self._log.error("Exception encountered while uploading %s" % str(last_ex))
if cb:
try:
cb(False)
except Exception as e:
self._log.warning("Exception on upload callback: %s" % str(e))
raise last_ex
if verbose:
self._log.debug("Finished upload: %s => %s" % (src_path, object_name))
if cb:
try:
cb(dest_path)
except Exception as e:
self._log.warning("Exception on upload callback: %s" % str(e))
return dest_path
def _get_object(self, path):
object_name = self._normalize_object_name(path)
try:
return self._driver.get_object(
container_name=self._container.name if self._container else '', object_name=object_name)
except ConnectionError:
raise DownloadError
except Exception as e:
self.log.warning('Storage helper problem for {}: {}'.format(str(object_name), str(e)))
return None
@staticmethod
def _initialize_upload_pool():
if not StorageHelper._upload_pool:
StorageHelper._upload_pool = ThreadPool(processes=1)
@staticmethod
def close_async_threads():
if StorageHelper._upload_pool:
pool = StorageHelper._upload_pool
StorageHelper._upload_pool = None
# noinspection PyBroadException
try:
pool.terminate()
pool.join()
except Exception:
pass
class _HttpDriver(_Driver):
""" LibCloud http/https adapter (simple, enough for now) """
timeout = (5.0, 30.)
min_kbps_speed = 50
schemes = ('http', 'https')
class _Container(object):
_default_backend_session = None
_default_files_server_host = None
def __init__(self, name, retries=5, **kwargs):
self.name = name
self.session = get_http_session_with_retry(total=retries, connect=retries, read=retries, redirect=retries)
def get_headers(self, url):
if not self._default_backend_session:
from ..backend_interface.base import InterfaceBase
self._default_backend_session = InterfaceBase._get_default_session()
if self._default_files_server_host is None:
self._default_files_server_host = self._default_backend_session.get_files_server_host().rstrip('/')
if url == self._default_files_server_host or url.startswith(self._default_files_server_host + '/'):
return self._default_backend_session.add_auth_headers({})
return None
class _HttpSessionHandle(object):
def __init__(self, url, is_stream, container_name, object_name):
self.url, self.is_stream, self.container_name, self.object_name = \
url, is_stream, container_name, object_name
def __init__(self, retries=5):
self._retries = retries
self._containers = {}
def get_container(self, container_name, config=None, **kwargs):
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, extra=None, callback=None, **kwargs):
url = object_name[:object_name.index('/')]
url_path = object_name[len(url) + 1:]
full_url = container.name + url
# when sending data in post, there is no connection timeout, just an entire upload timeout
timeout = self.timeout[-1]
stream_size = 0
if hasattr(iterator, 'tell') and hasattr(iterator, 'seek'):
pos = iterator.tell()
iterator.seek(0, 2)
stream_size = iterator.tell() - pos
iterator.seek(pos, 0)
timeout = max(timeout, (stream_size / 1024) / float(self.min_kbps_speed))
res = container.session.post(full_url, files={url_path: iterator}, timeout=timeout,
headers=container.get_headers(full_url))
if res.status_code != requests.codes.ok:
raise ValueError('Failed uploading object %s (%d): %s' % (object_name, res.status_code, res.text))
# call back is useless because we are not calling it while uploading...
# if callback and stream_size:
# try:
# callback(stream_size)
# except Exception as ex:
# log.debug('Exception raised when running callback function: %s' % ex)
return res
def list_container_objects(self, *args, **kwargs):
raise NotImplementedError('List is not implemented for http protocol')
def delete_object(self, obj, *args, **kwargs):
assert isinstance(obj, self._HttpSessionHandle)
container = self._containers[obj.container_name]
res = container.session.delete(obj.url, headers=container.get_headers(obj.url))
if res.status_code != requests.codes.ok:
self._get_logger().warning('Failed deleting object %s (%d): %s' % (
obj.object_name, res.status_code, res.text))
return False
return True
def get_object(self, container_name, object_name, *args, **kwargs):
is_stream = kwargs.get('stream', True)
url = ''.join((container_name, object_name.lstrip('/')))
return self._HttpSessionHandle(url, is_stream, container_name, object_name)
def _get_download_object(self, obj):
# bypass for session result
if not isinstance(obj, self._HttpSessionHandle):
return obj
container = self._containers[obj.container_name]
# set stream flag before we send the request
container.session.stream = obj.is_stream
res = container.session.get(obj.url, timeout=self.timeout, headers=container.get_headers(obj.url))
if res.status_code != requests.codes.ok:
raise ValueError('Failed getting object %s (%d): %s' % (obj.object_name, res.status_code, res.text))
return res
def download_object_as_stream(self, obj, chunk_size=64 * 1024, **_):
# return iterable object
obj = self._get_download_object(obj)
return obj.iter_content(chunk_size=chunk_size)
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
obj = self._get_download_object(obj)
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
length = 0
with p.open(mode='wb') as f:
for chunk in obj.iter_content(chunk_size=5 * 1024 * 1024):
# filter out keep-alive new chunks
if not chunk:
continue
chunk_size = len(chunk)
f.write(chunk)
length += chunk_size
if callback:
callback(chunk_size)
return length
def get_direct_access(self, remote_path, **_):
return None
def test_upload(self, test_path, config, **kwargs):
return True
def upload_object(self, file_path, container, object_name, extra, callback=None, **kwargs):
with open(file_path, 'rb') as stream:
return self.upload_object_via_stream(iterator=stream, container=container,
object_name=object_name, extra=extra, callback=callback, **kwargs)
class _Stream(object):
encoding = None
mode = 'rw'
name = ''
newlines = '\n'
softspace = False
def __init__(self, input_iterator=None):
self.closed = False
self._buffer = Queue()
self._input_iterator = input_iterator
self._leftover = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def close(self):
self.closed = True
def flush(self):
pass
def fileno(self):
return 87
def isatty(self):
return False
def next(self):
while not self.closed or not self._buffer.empty():
# input stream
if self._input_iterator:
try:
chunck = next(self._input_iterator)
return chunck
except StopIteration:
self.closed = True
raise StopIteration()
except Exception as ex:
_Driver.get_logger().error('Failed downloading: %s' % ex)
else:
# in/out stream
try:
return self._buffer.get(block=True, timeout=1.)
except Empty:
pass
raise StopIteration()
def read(self, size=None):
try:
data = self.next() if self._leftover is None else self._leftover
except StopIteration:
return six.b('')
self._leftover = None
try:
while size is None or not data or len(data) < size:
chunk = self.next()
if chunk is not None:
if data is not None:
data += chunk
else:
data = chunk
except StopIteration:
pass
if size is not None and data and len(data) > size:
self._leftover = data[size:]
return data[:size]
return data
def readline(self, size=None):
return self.read(size)
def readlines(self, sizehint=None):
pass
def truncate(self, size=None):
pass
def write(self, bytes):
self._buffer.put(bytes, block=True)
def writelines(self, sequence):
for s in sequence:
self.write(s)
class _Boto3Driver(_Driver):
""" Boto3 storage adapter (simple, enough for now) """
_min_pool_connections = 512
_max_multipart_concurrency = deferred_config('aws.boto3.max_multipart_concurrency', 16)
_pool_connections = deferred_config('aws.boto3.pool_connections', 512)
_stream_download_pool_connections = 128
_stream_download_pool = None
_containers = {}
scheme = 's3'
scheme_prefix = str(furl(scheme=scheme, netloc=''))
_bucket_location_failure_reported = set()
class _Container(object):
_creation_lock = threading.Lock()
def __init__(self, name, cfg):
try:
import boto3
import botocore.client
from botocore.exceptions import ClientError # noqa: F401
except ImportError:
raise UsageError(
'AWS S3 storage driver (boto3) not found. '
'Please install driver using: pip install \"boto3>=1.9\"'
)
# skip 's3://'
self.name = name[5:]
endpoint = (('https://' if cfg.secure else 'http://') + cfg.host) if cfg.host else None
# boto3 client creation isn't thread-safe (client itself is)
with self._creation_lock:
boto_kwargs = {
"endpoint_url": endpoint,
"use_ssl": cfg.secure,
"verify": cfg.verify,
"config": botocore.client.Config(
max_pool_connections=max(
_Boto3Driver._min_pool_connections,
_Boto3Driver._pool_connections)
)
}
if not cfg.use_credentials_chain:
boto_kwargs["aws_access_key_id"] = cfg.key
boto_kwargs["aws_secret_access_key"] = cfg.secret
self.resource = boto3.resource(
's3',
**boto_kwargs
)
self.config = cfg
bucket_name = self.name[len(cfg.host) + 1:] if cfg.host else self.name
self.bucket = self.resource.Bucket(bucket_name)
@attrs
class ListResult(object):
name = attrib(default=None)
def __init__(self):
pass
def _get_stream_download_pool(self):
if self._stream_download_pool is None:
self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)
return self._stream_download_pool
def get_container(self, container_name, config=None, **kwargs):
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, cfg=config)
self._containers[container_name].config.retries = kwargs.get('retries', 5)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):
import boto3.s3.transfer
stream = _Stream(iterator)
try:
container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries),
Callback=callback,
)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):
import boto3.s3.transfer
try:
container.bucket.upload_file(file_path, object_name, Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries),
Callback=callback)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def list_container_objects(self, container, ex_prefix=None, **kwargs):
if ex_prefix:
res = container.bucket.objects.filter(Prefix=ex_prefix)
else:
res = container.bucket.objects.all()
for res in res:
yield self.ListResult(name=res.key)
def delete_object(self, object, **kwargs):
from botocore.exceptions import ClientError
object.delete()
try:
# Try loading the file to verify deletion
object.load()
return False
except ClientError as e:
return int(e.response['Error']['Code']) == 404
def get_object(self, container_name, object_name, *args, **kwargs):
full_container_name = 's3://' + container_name
container = self._containers[full_container_name]
obj = container.resource.Object(container.bucket.name, object_name)
obj.container_name = full_container_name
return obj
def download_object_as_stream(self, obj, chunk_size=64 * 1024, verbose=None, log=None, **_):
def async_download(a_obj, a_stream, cb, cfg):
try:
a_obj.download_fileobj(a_stream, Callback=cb, Config=cfg)
except Exception as ex:
(log or self.get_logger()).error('Failed downloading: %s' % ex)
a_stream.close()
import boto3.s3.transfer
# return iterable object
stream = _Stream()
container = self._containers[obj.container_name]
config = boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries)
total_size_mb = obj.content_length / (1024. * 1024.)
remote_path = os.path.join(obj.container_name, obj.key)
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, log)
self._get_stream_download_pool().submit(async_download, obj, stream, cb, config)
return stream
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
import boto3.s3.transfer
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
container = self._containers[obj.container_name]
obj.download_file(str(p),
Callback=callback,
Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries))
@classmethod
def _test_bucket_config(cls, conf, log, test_path='', raise_on_error=True, log_on_error=True):
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
return False
if not conf.bucket:
return False
try:
if not conf.is_valid():
raise Exception('Missing credentials')
fullname = furl(conf.bucket).add(path=test_path).add(path='%s-upload_test' % cls.__module__)
bucket_name = str(fullname.path.segments[0])
filename = str(furl(path=fullname.path.segments[1:]))
data = {
'user': getpass.getuser(),
'machine': gethostname(),
'time': datetime.utcnow().isoformat()
}
boto_session = boto3.Session(conf.key, conf.secret)
boto_resource = boto_session.resource('s3', conf.region)
bucket = boto_resource.Bucket(bucket_name)
bucket.put_object(Key=filename, Body=six.b(json.dumps(data)))
region = cls._get_bucket_region(conf=conf, log=log, report_info=True)
if region and ((conf.region and region != conf.region) or (not conf.region and region != 'us-east-1')):
msg = "incorrect region specified for bucket %s (detected region %s)" % (conf.bucket, region)
else:
return True
except ClientError as ex:
msg = ex.response['Error']['Message']
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise
except Exception as ex:
msg = str(ex)
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise
msg = ("Failed testing access to bucket %s: " % conf.bucket) + msg
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise StorageError(msg)
return False
@classmethod
def _get_bucket_region(cls, conf, log=None, report_info=False):
import boto3
from botocore.exceptions import ClientError
if not conf.bucket:
return None
def report(msg):
if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported:
if report_info:
log.debug(msg)
else:
log.warning(msg)
cls._bucket_location_failure_reported.add(conf.get_bucket_host())
try:
boto_session = boto3.Session(conf.key, conf.secret)
boto_resource = boto_session.resource('s3')
return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)["LocationConstraint"]
except ClientError as ex:
report("Failed getting bucket location (region) for bucket "
"%s: %s (%s, access_key=%s). Default region will be used. "
"This is normal if you do not have GET_BUCKET_LOCATION permission"
% (conf.bucket, ex.response['Error']['Message'], ex.response['Error']['Code'], conf.key))
except Exception as ex:
report("Failed getting bucket location (region) for bucket %s: %s. Default region will be used."
% (conf.bucket, str(ex)))
return None
def get_direct_access(self, remote_path, **_):
return None
def test_upload(self, test_path, config, **_):
return True
class _GoogleCloudStorageDriver(_Driver):
"""Storage driver for google cloud storage"""
_stream_download_pool_connections = 128
_stream_download_pool = None
_containers = {}
scheme = 'gs'
scheme_prefix = str(furl(scheme=scheme, netloc=''))
class _Container(object):
def __init__(self, name, cfg):
try:
from google.cloud import storage
from google.oauth2 import service_account
except ImportError:
raise UsageError(
'Google cloud driver not found. '
'Please install driver using: pip install \"google-cloud-storage>=1.13.2\"'
)
self.name = name[len(_GoogleCloudStorageDriver.scheme_prefix):]
if cfg.credentials_json:
credentials = service_account.Credentials.from_service_account_file(cfg.credentials_json)
else:
credentials = None
self.client = storage.Client(project=cfg.project, credentials=credentials)
for adapter in self.client._http.adapters.values():
if cfg.pool_connections:
adapter._pool_connections = cfg.pool_connections
if cfg.pool_maxsize:
adapter._pool_maxsize = cfg.pool_maxsize
self.config = cfg
self.bucket = self.client.bucket(self.name)
def _get_stream_download_pool(self):
if self._stream_download_pool is None:
self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)
return self._stream_download_pool
def get_container(self, container_name, config=None, **kwargs):
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, cfg=config)
self._containers[container_name].config.retries = kwargs.get('retries', 5)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):
try:
blob = container.bucket.blob(object_name)
blob.upload_from_file(iterator)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def upload_object(self, file_path, container, object_name, extra=None, **kwargs):
try:
blob = container.bucket.blob(object_name)
blob.upload_from_filename(file_path)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def list_container_objects(self, container, **kwargs):
return list(container.bucket.list_blobs())
def delete_object(self, object, **kwargs):
try:
object.delete()
except Exception as ex:
try:
from google.cloud.exceptions import NotFound
if isinstance(ex, NotFound):
return False
except ImportError:
pass
name = getattr(object, "name", "")
self.get_logger().warning("Failed deleting object {}: {}".format(name, ex))
return False
return not object.exists()
def get_object(self, container_name, object_name, *args, **kwargs):
full_container_name = str(furl(scheme=self.scheme, netloc=container_name))
container = self._containers[full_container_name]
obj = container.bucket.blob(object_name)
obj.container_name = full_container_name
return obj
def download_object_as_stream(self, obj, chunk_size=256 * 1024, **_):
raise NotImplementedError('Unsupported for google storage')
def async_download(a_obj, a_stream):
try:
a_obj.download_to_file(a_stream)
except Exception as ex:
self.get_logger().error('Failed downloading: %s' % ex)
a_stream.close()
# return iterable object
stream = _Stream()
obj.chunk_size = chunk_size
self._get_stream_download_pool().submit(async_download, obj, stream)
return stream
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
obj.download_to_filename(str(p))
def test_upload(self, test_path, config, **_):
bucket_url = str(furl(scheme=self.scheme, netloc=config.bucket, path=config.subdir))
bucket = self.get_container(container_name=bucket_url, config=config).bucket
test_obj = bucket
if test_path:
if not test_path.endswith('/'):
test_path += '/'
blob = bucket.blob(test_path)
if blob.exists():
test_obj = blob
permissions_to_test = ('storage.objects.get', 'storage.objects.update')
return set(test_obj.test_iam_permissions(permissions_to_test)) == set(permissions_to_test)
def get_direct_access(self, remote_path, **_):
return None
class _AzureBlobServiceStorageDriver(_Driver):
scheme = 'azure'
_containers = {}
class _Container(object):
def __init__(self, name, config):
try:
from azure.common import AzureHttpError # noqa: F401
from azure.storage.blob import BlockBlobService
except ImportError:
raise UsageError(
'Azure blob storage driver not found. '
'Please install driver using: pip install \"azure.storage.blob<=2.1.0\"'
)
self.name = name
self.config = config
self.blob_service = BlockBlobService(
account_name=config.account_name,
account_key=config.account_key,
)
@attrs
class _Object(object):
container = attrib()
blob_name = attrib()
content_length = attrib()
def get_container(self, container_name=None, config=None, **kwargs):
container_name = container_name or config.container_name
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, config=config)
# self._containers[container_name].config.retries = kwargs.get('retries', 5)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):
from azure.common import AzureHttpError # noqa
blob_name = self._blob_name_from_object_path(object_name, container.name) # noqa: F841
try:
container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024
container.blob_service.socket_timeout = (300, 2000)
container.blob_service.create_blob_from_bytes(
container.name,
object_name,
iterator.read() if hasattr(iterator, "read") else bytes(iterator),
# timeout=300,
max_connections=2,
progress_callback=callback,
)
return True
except AzureHttpError as ex:
self.get_logger().error('Failed uploading (Azure error): %s' % ex)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):
from azure.common import AzureHttpError # noqa
blob_name = self._blob_name_from_object_path(object_name, container.name)
stream = None
try:
from azure.storage.blob import ContentSettings # noqa
from mimetypes import guess_type
container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024
container.blob_service.socket_timeout = (300, 2000)
container.blob_service.create_blob_from_path(
container.name,
blob_name,
file_path,
# timeout=300,
max_connections=2,
content_settings=ContentSettings(content_type=guess_type(file_path)),
progress_callback=callback,
)
return True
except AzureHttpError as ex:
self.get_logger().error('Failed uploading (Azure error): %s' % ex)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
finally:
if stream:
stream.close()
def list_container_objects(self, container, ex_prefix=None, **kwargs):
return list(container.blob_service.list_blobs(container_name=container.name, prefix=ex_prefix))
def delete_object(self, object, **kwargs):
container = object.container
container.blob_service.delete_blob(
container.name,
object.blob_name,
)
return not object.container.blob_service.exists(container.name, object.blob_name)
def get_object(self, container_name, object_name, *args, **kwargs):
container = self._containers.get(container_name)
if not container:
raise StorageError("Container `{}` not found for object {}".format(container_name, object_name))
# blob_name = self._blob_name_from_object_path(object_name, container_name)
blob = container.blob_service.get_blob_properties(container.name, object_name)
return self._Object(container=container, blob_name=blob.name, content_length=blob.properties.content_length)
def download_object_as_stream(self, obj, verbose, *_, **__):
container = obj.container
total_size_mb = obj.content_length / (1024. * 1024.)
remote_path = os.path.join(
"{}://".format(self.scheme),
container.config.account_name,
container.name,
obj.blob_name
)
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self.get_logger())
blob = container.blob_service.get_blob_to_bytes(
container.name,
obj.blob_name,
progress_callback=cb,
)
return blob.content
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
download_done = threading.Event()
download_done.counter = 0
def callback_func(current, total):
if callback:
chunk = current - download_done.counter
download_done.counter += chunk
callback(chunk)
if current >= total:
download_done.set()
container = obj.container
container.blob_service.MAX_SINGLE_GET_SIZE = 5 * 1024 * 1024
_ = container.blob_service.get_blob_to_path(
container.name,
obj.blob_name,
local_path,
max_connections=10,
progress_callback=callback_func,
)
download_done.wait()
def test_upload(self, test_path, config, **_):
container = self.get_container(config=config)
try:
container.blob_service.get_container_properties(container.name)
except Exception:
return False
else:
# Using the account Key, we can always upload...
return True
@classmethod
def _blob_name_from_object_path(cls, name, container_name):
scheme = urlparse(name).scheme
if scheme:
if scheme != cls.scheme:
raise StorageError(
"When using a URL, only the `{}` scheme is supported for Azure storage: {}",
cls.scheme,
name,
)
f = furl(name)
if not f.path.segments:
raise StorageError(
"Missing container name in URL {}",
name,
)
parsed_container_name = f.path.segments[0]
if parsed_container_name != container_name:
raise StorageError(
"Container name mismatch (expected {}, found {}) in {}",
container_name,
parsed_container_name,
name,
)
if len(f.path.segments) == 1:
raise StorageError(
"No path found following container name {} in {}",
container_name,
name,
)
return f.path.segments[0], os.path.join(*f.path.segments[1:])
return name
def get_direct_access(self, remote_path, **_):
return None
class _FileStorageDriver(_Driver):
"""
A base StorageDriver to derive from.
"""
scheme = "file"
CHUNK_SIZE = 8096
IGNORE_FOLDERS = ['.lock', '.hash']
Object = namedtuple("Object", ['name', 'size', 'extra', 'driver', 'container', 'hash', 'meta_data'])
class _Container(object):
def __init__(self, name, extra, driver):
self.name = name
self.extra = extra
self.driver = driver
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
# Use the key as the path to the storage
self.base_path = key
def _make_path(self, path, ignore_existing=True):
"""
Create a path by checking if it already exists
"""
try:
os.makedirs(path)
except OSError:
exp = sys.exc_info()[1]
if exp.errno == errno.EEXIST and not ignore_existing:
raise exp
def _check_container_name(self, container_name):
"""
Check if the container name is valid
:param container_name: Container name
:type container_name: ``str``
"""
if '/' in container_name or '\\' in container_name:
raise ValueError("Container name \"{}\" cannot contain \\ or / ".format(container_name))
def _make_container(self, container_name):
"""
Create a container instance
:param container_name: Container name.
:type container_name: ``str``
:return: A Container instance.
"""
container_name = container_name or '.'
self._check_container_name(container_name)
full_path = os.path.realpath(os.path.join(self.base_path, container_name))
try:
stat = os.stat(full_path)
if not os.path.isdir(full_path):
raise OSError("Target path \"{}\" is not a directory".format(full_path))
except OSError:
raise OSError("Target path \"{}\" is not accessible or does not exist".format(full_path))
extra = {
'creation_time': stat.st_ctime,
'access_time': stat.st_atime,
'modify_time': stat.st_mtime,
}
return self._Container(name=container_name, extra=extra, driver=self)
def _make_object(self, container, object_name):
"""
Create an object instance
:param container: Container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:return: A Object instance.
"""
full_path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.', object_name))
if os.path.isdir(full_path):
raise ValueError("Target path \"{}\" already exist".format(full_path))
try:
stat = os.stat(full_path)
except Exception:
raise ValueError("Cannot access target path \"{}\"".format(full_path))
extra = {
'creation_time': stat.st_ctime,
'access_time': stat.st_atime,
'modify_time': stat.st_mtime,
}
return self.Object(name=object_name, size=stat.st_size, extra=extra,
driver=self, container=container, hash=None, meta_data=None)
def iterate_containers(self):
"""
Return a generator of containers.
:return: A generator of Container instances.
"""
for container_name in os.listdir(self.base_path):
full_path = os.path.join(self.base_path, container_name)
if not os.path.isdir(full_path):
continue
yield self._make_container(container_name)
def _get_objects(self, container):
"""
Recursively iterate through the file-system and return the object names
"""
cpath = self.get_container_cdn_url(container, check=True)
for folder, subfolders, files in os.walk(cpath, topdown=True):
# Remove unwanted subfolders
for subf in self.IGNORE_FOLDERS:
if subf in subfolders:
subfolders.remove(subf)
for name in files:
full_path = os.path.join(folder, name)
object_name = os.path.relpath(full_path, start=cpath)
yield self._make_object(container, object_name)
def iterate_container_objects(self, container):
"""
Returns a generator of objects for the given container.
:param container: Container instance
:type container: :class:`Container`
:return: A generator of Object instances.
"""
return self._get_objects(container)
def get_container(self, container_name, **_):
"""
Return a container instance.
:param container_name: Container name.
:type container_name: ``str``
:return: A Container instance.
"""
return self._make_container(container_name)
def get_container_cdn_url(self, container, check=False):
"""
Return a container CDN URL.
:param container: Container instance
:type container: :class:`Container`
:param check: Indicates if the path's existence must be checked
:type check: ``bool``
:return: A CDN URL for this container.
"""
path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.'))
if check and not os.path.isdir(path):
raise ValueError("Target path \"{}\" does not exist".format(path))
return path
def get_object(self, container_name, object_name, **_):
"""
Return an object instance.
:param container_name: Container name.
:type container_name: ``str``
:param object_name: Object name.
:type object_name: ``str``
:return: An Object instance.
"""
container = self._make_container(container_name)
return self._make_object(container, object_name)
def get_object_cdn_url(self, obj):
"""
Return an object CDN URL.
:param obj: Object instance
:type obj: :class:`Object`
:return: A CDN URL for this object.
"""
return os.path.realpath(os.path.join(self.base_path, obj.container.name, obj.name))
def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True, **_):
"""
Download an object to the specified destination path.
:param obj: Object instance.
:type obj: :class:`Object`
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash mismatch / file size).
:type delete_on_failure: ``bool``
:return: True, if an object has been successfully downloaded, False, otherwise.
"""
obj_path = self.get_object_cdn_url(obj)
base_name = os.path.basename(destination_path)
if not base_name and not os.path.exists(destination_path):
raise ValueError('Path \"{}\" does not exist'.format(destination_path))
if not base_name:
file_path = os.path.join(destination_path, obj.name)
else:
file_path = destination_path
if os.path.exists(file_path) and not overwrite_existing:
raise ValueError('File \"{}\" already exists, but overwrite_existing=False'.format(file_path))
try:
shutil.copy(obj_path, file_path)
except IOError:
if delete_on_failure:
# noinspection PyBroadException
try:
os.unlink(file_path)
except Exception:
pass
return False
return True
def download_object_as_stream(self, obj, chunk_size=None, **_):
"""
Return a generator which yields object data.
:param obj: Object instance
:type obj: :class:`Object`
:param chunk_size: Optional chunk size (in bytes).
:type chunk_size: ``int``
:return: A stream of binary chunks of data.
"""
path = self.get_object_cdn_url(obj)
with open(path, 'rb') as obj_file:
for data in self._read_in_chunks(obj_file, chunk_size=chunk_size):
yield data
def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, **_):
"""
Upload an object currently located on a disk.
:param file_path: Path to the object on disk.
:type file_path: ``str``
:param container: Destination container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:param verify_hash: Verify hast
:type verify_hash: ``bool``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
path = self.get_container_cdn_url(container, check=True)
obj_path = os.path.join(path, object_name)
base_path = os.path.dirname(obj_path)
self._make_path(base_path)
shutil.copy(file_path, obj_path)
os.chmod(obj_path, int('664', 8))
return self._make_object(container, object_name)
def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):
"""
Upload an object using an iterator.
If a provider supports it, chunked transfer encoding is used and you
don't need to know in advance the amount of data to be uploaded.
Otherwise if a provider doesn't support it, iterator will be exhausted
so a total size for data to be uploaded can be determined.
Note: Exhausting the iterator means that the whole data must be
buffered in memory which might result in memory exhausting when
uploading a very large object.
If a file is located on a disk you are advised to use upload_object
function which uses fs.stat function to determine the file size and it
doesn't need to buffer whole object in the memory.
:type iterator: ``object``
:param iterator: An object which implements the iterator
interface and yields binary chunks of data.
:type container: :class:`Container`
:param container: Destination container.
:type object_name: ``str``
:param object_name: Object name.
:type extra: ``dict``
:param extra: (optional) Extra attributes (driver specific). Note:
This dictionary must contain a 'content_type' key which represents
a content type of the stored object.
"""
path = self.get_container_cdn_url(container, check=True)
obj_path = os.path.join(path, object_name)
base_path = os.path.dirname(obj_path)
self._make_path(base_path)
obj_path = os.path.realpath(obj_path)
with open(obj_path, 'wb' if not isinstance(iterator, StringIO) else 'wt') as obj_file:
obj_file.write(iterator.read() if hasattr(iterator, 'read') else bytes(iterator))
os.chmod(obj_path, int('664', 8))
return self._make_object(container, object_name)
def delete_object(self, obj, **_):
"""
Delete an object.
:type obj: :class:`Object`
:param obj: Object instance.
:return: True on success.
"""
path = self.get_object_cdn_url(obj)
try:
os.unlink(path)
except Exception:
return False
# # Check and delete all the empty parent folders
# path = os.path.dirname(path)
# container_url = obj.container.get_cdn_url()
#
# # Delete the empty parent folders till the container's level
# while path != container_url:
# try:
# os.rmdir(path)
# except OSError:
# exp = sys.exc_info()[1]
# if exp.errno == errno.ENOTEMPTY:
# break
# raise exp
#
# path = os.path.dirname(path)
return True
def create_container(self, container_name):
"""
Create a new container.
:type container_name: ``str``
:param container_name: Container name.
:return: A Container instance on success.
"""
container_name = container_name or '.'
self._check_container_name(container_name)
path = os.path.join(self.base_path, container_name)
try:
self._make_path(path, ignore_existing=False)
except OSError:
exp = sys.exc_info()[1]
if exp.errno == errno.EEXIST:
raise ValueError('Container \"{}\" with this name already exists. The name '
'must be unique among all the containers in the '
'system'.format(container_name))
else:
raise ValueError('Error creating container \"{}\"'.format(container_name))
except Exception:
raise ValueError('Error creating container \"{}\"'.format(container_name))
return self._make_container(container_name)
def delete_container(self, container):
"""
Delete a container.
:type container: :class:`Container`
:param container: Container instance
:return: True on success, False otherwise.
"""
# Check if there are any objects inside this
for obj in self._get_objects(container):
raise ValueError('Container \"{}\" is not empty'.format(container.name))
path = self.get_container_cdn_url(container, check=True)
# noinspection PyBroadException
try:
shutil.rmtree(path)
except Exception:
return False
return True
def list_container_objects(self, container, **kwargs):
return list(self.iterate_container_objects(container))
@staticmethod
def _read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False):
"""
Return a generator which yields data in chunks.
:param iterator: An object which implements an iterator interface
or a File like object with read method.
:type iterator: :class:`object` which implements iterator interface.
:param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)
:type chunk_size: ``int``
:param fill_size: If True, make sure chunks are exactly chunk_size in
length (except for last chunk).
:type fill_size: ``bool``
:param yield_empty: If true and iterator returned no data, only yield empty
bytes object
:type yield_empty: ``bool``
TODO: At some point in the future we could use byte arrays here if version
>= Python 3. This should speed things up a bit and reduce memory usage.
"""
chunk_size = chunk_size or _FileStorageDriver.CHUNK_SIZE
if six.PY3:
from io import FileIO as file
if isinstance(iterator, (file)):
get_data = iterator.read
args = (chunk_size,)
else:
get_data = next
args = (iterator,)
data = bytes('')
empty = False
while not empty or len(data) > 0:
if not empty:
try:
chunk = bytes(get_data(*args))
if len(chunk) > 0:
data += chunk
else:
empty = True
except StopIteration:
empty = True
if len(data) == 0:
if empty and yield_empty:
yield bytes('')
return
if fill_size:
if empty or len(data) >= chunk_size:
yield data[:chunk_size]
data = data[chunk_size:]
else:
yield data
data = bytes('')
def get_direct_access(self, remote_path, **_):
# this will always make sure we have full path and file:// prefix
full_url = StorageHelper.conform_url(remote_path)
# now get rid of the file:// prefix
path = Path(full_url[7:])
if not path.exists():
raise ValueError("Requested path does not exist: {}".format(path))
return path.as_posix()
def test_upload(self, test_path, config, **kwargs):
return True
driver_schemes = set(
filter(
None,
itertools.chain(
(getattr(cls, "scheme", None) for cls in _Driver.__subclasses__()),
*(getattr(cls, "schemes", []) for cls in _Driver.__subclasses__())
)
)
)
remote_driver_schemes = driver_schemes - {_FileStorageDriver.scheme}
| 37.369776 | 120 | 0.599702 | from __future__ import with_statement
import errno
import getpass
import itertools
import json
import os
import shutil
import sys
import threading
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from datetime import datetime
from multiprocessing.pool import ThreadPool
from tempfile import mktemp
from time import time
from types import GeneratorType
import requests
import six
from _socket import gethostname
from attr import attrs, attrib, asdict
from furl import furl
from pathlib2 import Path
from requests.exceptions import ConnectionError
from six import binary_type, StringIO
from six.moves.queue import Queue, Empty
from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import url2pathname
from .callbacks import UploadProgressReport, DownloadProgressReport
from .util import quote_url
from ..backend_api.utils import get_http_session_with_retry
from ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations, AzureContainerConfigurations
from ..config import config, deferred_config
from ..debugging import get_logger
from ..errors import UsageError
class StorageError(Exception):
pass
class DownloadError(Exception):
pass
@six.add_metaclass(ABCMeta)
class _Driver(object):
@classmethod
def get_logger(cls):
return get_logger('storage')
@abstractmethod
def get_container(self, container_name, config=None, **kwargs):
pass
@abstractmethod
def test_upload(self, test_path, config, **kwargs):
pass
@abstractmethod
def upload_object_via_stream(self, iterator, container, object_name, extra, **kwargs):
pass
@abstractmethod
def list_container_objects(self, container, ex_prefix, **kwargs):
pass
@abstractmethod
def get_direct_access(self, remote_path, **kwargs):
pass
@abstractmethod
def download_object(self, obj, local_path, overwrite_existing, delete_on_failure, callback, **kwargs):
pass
@abstractmethod
def download_object_as_stream(self, obj, chunk_size, **kwargs):
pass
@abstractmethod
def delete_object(self, obj, **kwargs):
pass
@abstractmethod
def upload_object(self, file_path, container, object_name, extra, **kwargs):
pass
@abstractmethod
def get_object(self, container_name, object_name, **kwargs):
pass
class StorageHelper(object):
_temp_download_suffix = '.partially'
@classmethod
def _get_logger(cls):
return get_logger('storage')
@attrs
class _PathSubstitutionRule(object):
registered_prefix = attrib(type=str)
local_prefix = attrib(type=str)
replace_windows_sep = attrib(type=bool)
replace_linux_sep = attrib(type=bool)
path_substitution_config = 'storage.path_substitution'
@classmethod
def load_list_from_config(cls):
rules_list = []
for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())):
rule = cls(
registered_prefix=sub_config.get('registered_prefix', None),
local_prefix=sub_config.get('local_prefix', None),
replace_windows_sep=sub_config.get('replace_windows_sep', False),
replace_linux_sep=sub_config.get('replace_linux_sep', False),
)
if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)):
StorageHelper._get_logger().warning(
"Illegal substitution rule configuration '{}[{}]': {}".format(
cls.path_substitution_config,
index,
asdict(rule),
))
continue
if all((rule.replace_windows_sep, rule.replace_linux_sep)):
StorageHelper._get_logger().warning(
"Only one of replace_windows_sep and replace_linux_sep flags may be set."
"'{}[{}]': {}".format(
cls.path_substitution_config,
index,
asdict(rule),
))
continue
rules_list.append(rule)
return rules_list
class _UploadData(object):
@property
def src_path(self):
return self._src_path
@property
def dest_path(self):
return self._dest_path
@property
def extra(self):
return self._extra
@property
def callback(self):
return self._callback
@property
def retries(self):
return self._retries
def __init__(self, src_path, dest_path, extra, callback, retries):
self._src_path = src_path
self._dest_path = dest_path
self._extra = extra
self._callback = callback
self._retries = retries
def __str__(self):
return "src=%s" % self.src_path
_helpers = {}
_terminate = threading.Event()
_async_upload_threads = set()
_upload_pool = None
_s3_configurations = deferred_config('aws.s3', {}, transform=S3BucketConfigurations.from_config)
_gs_configurations = deferred_config('google.storage', {}, transform=GSBucketConfigurations.from_config)
_azure_configurations = deferred_config('azure.storage', {}, transform=AzureContainerConfigurations.from_config)
_path_substitutions = deferred_config(transform=_PathSubstitutionRule.load_list_from_config)
@property
def log(self):
return self._log
@property
def scheme(self):
return self._scheme
@property
def secure(self):
return self._secure
@property
def base_url(self):
return self._base_url
@classmethod
def get(cls, url, logger=None, **kwargs):
# Handle URL substitution etc before locating the correct storage driver
url = cls._canonize_url(url)
# Get the credentials we should use for this url
base_url = cls._resolve_base_url(url)
instance_key = '%s_%s' % (base_url, threading.current_thread().ident or 0)
force_create = kwargs.pop('__force_create', False)
if (instance_key in cls._helpers) and (not force_create):
return cls._helpers[instance_key]
# Don't canonize URL since we already did it
try:
instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs)
except (StorageError, UsageError) as ex:
cls._get_logger().error(str(ex))
return None
except Exception as ex:
cls._get_logger().error("Failed creating storage object {} Reason: {}".format(
base_url or url, ex))
return None
cls._helpers[instance_key] = instance
return instance
@classmethod
def get_local_copy(cls, remote_url):
helper = cls.get(remote_url)
if not helper:
return None
file_name = '.' + remote_url.split('/')[-1].split(os.path.sep)[-1]
local_path = mktemp(suffix=file_name)
return helper.download_to_file(remote_url, local_path)
def __init__(self, base_url, url, key=None, secret=None, region=None, verbose=False, logger=None, retries=5,
**kwargs):
level = config.get('storage.log.level', None)
if level:
try:
self._get_logger().setLevel(level)
except (TypeError, ValueError):
self._get_logger().error('invalid storage log level in configuration: %s' % level)
self._log = logger or self._get_logger()
self._verbose = verbose
self._retries = retries
self._extra = {}
self._base_url = base_url
self._secure = True
self._driver = None
self._container = None
self._conf = None
if kwargs.get('canonize_url', True):
url = self._canonize_url(url)
parsed = urlparse(url)
self._scheme = parsed.scheme
if self._scheme == _AzureBlobServiceStorageDriver.scheme:
self._conf = copy(self._azure_configurations.get_config_by_uri(url))
if self._conf is None:
raise StorageError("Missing Azure Blob Storage configuration for {}".format(url))
if not self._conf.account_name or not self._conf.account_key:
raise StorageError(
"Missing account name or key for Azure Blob Storage access for {}".format(base_url)
)
self._driver = _AzureBlobServiceStorageDriver()
self._container = self._driver.get_container(config=self._conf)
elif self._scheme == _Boto3Driver.scheme:
self._conf = copy(self._s3_configurations.get_config_by_uri(url))
self._secure = self._conf.secure
final_region = region if region else self._conf.region
if not final_region:
final_region = None
self._conf.update(
key=key or self._conf.key,
secret=secret or self._conf.secret,
multipart=self._conf.multipart,
region=final_region,
use_credentials_chain=self._conf.use_credentials_chain
)
if not self._conf.use_credentials_chain:
if not self._conf.key or not self._conf.secret:
raise ValueError(
"Missing key and secret for S3 storage access (%s)" % base_url
)
self._driver = _Boto3Driver()
self._container = self._driver.get_container(container_name=self._base_url, retries=retries,
config=self._conf)
elif self._scheme == _GoogleCloudStorageDriver.scheme:
self._conf = copy(self._gs_configurations.get_config_by_uri(url))
self._driver = _GoogleCloudStorageDriver()
self._container = self._driver.get_container(
container_name=self._base_url,
config=self._conf
)
elif self._scheme in _HttpDriver.schemes:
self._driver = _HttpDriver(retries=retries)
self._container = self._driver.get_container(container_name=self._base_url)
else:
if self._scheme == 'file':
url = parsed.path
url = url.replace("\\", "/")
driver_uri = url2pathname(url)
path_driver_uri = Path(driver_uri)
h_driver_uri.root))
self._container = None
@classmethod
def terminate_uploads(cls, force=True, timeout=2.0):
if force:
return
cls._terminate.set()
remaining_timeout = timeout
for thread in cls._async_upload_threads:
t = time()
try:
thread.join(timeout=remaining_timeout)
except Exception:
pass
remaining_timeout -= (time() - t)
@classmethod
def get_configuration(cls, bucket_config):
return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host)
@classmethod
def add_configuration(cls, bucket_config, log=None, _test_config=True):
use_existing = not bucket_config.is_valid()
existing = cls.get_configuration(bucket_config)
configs = cls._s3_configurations
if not use_existing:
if _test_config:
_Boto3Driver._test_bucket_config(bucket_config, log)
if existing:
if log:
log.warning('Overriding existing configuration for %s/%s'
% (existing.host or 'AWS', existing.bucket))
configs.remove_config(existing)
else:
good_config = False
if existing:
if log:
log.info('Using existing credentials for bucket %s/%s'
% (bucket_config.host or 'AWS', bucket_config.bucket))
good_config = _Boto3Driver._test_bucket_config(existing, log, raise_on_error=False)
if not good_config:
configs.update_config_with_defaults(bucket_config)
if log:
log.info('Using global credentials for bucket %s/%s'
% (bucket_config.host or 'AWS', bucket_config.bucket))
if _test_config:
_Boto3Driver._test_bucket_config(bucket_config, log)
else:
return
configs.add_config(bucket_config)
@classmethod
def add_path_substitution(
cls,
registered_prefix,
local_prefix,
replace_windows_sep=False,
replace_linux_sep=False,
):
if not registered_prefix or not local_prefix:
raise UsageError("Path substitution prefixes must be non empty strings")
if replace_windows_sep and replace_linux_sep:
raise UsageError("Only one of replace_windows_sep and replace_linux_sep may be set.")
rule = cls._PathSubstitutionRule(
registered_prefix=registered_prefix,
local_prefix=local_prefix,
replace_windows_sep=replace_windows_sep,
replace_linux_sep=replace_linux_sep,
)
cls._path_substitutions.append(rule)
@classmethod
def clear_path_substitutions(cls):
cls._path_substitutions = list()
def verify_upload(self, folder_uri='', raise_on_error=True, log_on_error=True):
folder_uri = self._canonize_url(folder_uri)
folder_uri = self.conform_url(folder_uri, self._base_url)
test_path = self._normalize_object_name(folder_uri)
if self._scheme == _Boto3Driver.scheme:
_Boto3Driver._test_bucket_config(
self._conf,
self._log,
test_path=test_path,
raise_on_error=raise_on_error,
log_on_error=log_on_error,
)
elif self._scheme == _GoogleCloudStorageDriver.scheme:
self._driver.test_upload(test_path, self._conf)
elif self._scheme == 'file':
Path(test_path).mkdir(parents=True, exist_ok=True)
Path(test_path).touch(exist_ok=True)
return folder_uri
def upload_from_stream(self, stream, dest_path, extra=None, retries=1):
dest_path = self._canonize_url(dest_path)
object_name = self._normalize_object_name(dest_path)
extra = extra.copy() if extra else {}
extra.update(self._extra)
last_ex = None
cb = UploadProgressReport.from_stream(stream, object_name, self._verbose, self._log)
for i in range(max(1, retries)):
try:
self._driver.upload_object_via_stream(
iterator=stream,
container=self._container,
object_name=object_name,
callback=cb,
extra=extra)
last_ex = None
break
except Exception as ex:
last_ex = ex
try:
stream.seek(0)
except Exception:
pass
if last_ex:
raise last_ex
if self.scheme in _HttpDriver.schemes:
dest_path = quote_url(dest_path)
return dest_path
def upload(self, src_path, dest_path=None, extra=None, async_enable=False, cb=None, retries=1):
if not dest_path:
dest_path = os.path.basename(src_path)
dest_path = self._canonize_url(dest_path)
if cb and self.scheme in _HttpDriver.schemes:
a_cb = cb
def callback(a_path):
return a_cb(quote_url(a_path) if a_path else a_path)
cb = callback
if async_enable:
data = self._UploadData(src_path=src_path, dest_path=dest_path, extra=extra, callback=cb, retries=retries)
StorageHelper._initialize_upload_pool()
return StorageHelper._upload_pool.apply_async(self._do_async_upload, args=(data,))
else:
res = self._do_upload(src_path, dest_path, extra, cb, verbose=False, retries=retries)
if res:
res = quote_url(res)
return res
def list(self, prefix=None):
if prefix:
if prefix.startswith(self._base_url):
prefix = prefix[len(self.base_url):].lstrip("/")
try:
res = self._driver.list_container_objects(self._container, ex_prefix=prefix)
except TypeError:
res = self._driver.list_container_objects(self._container)
return [
obj.name
for obj in res if
obj.name.startswith(prefix) and obj.name != prefix
]
else:
return [obj.name for obj in self._driver.list_container_objects(self._container)]
def download_to_file(self, remote_path, local_path, overwrite_existing=False, delete_on_failure=True, verbose=None):
def next_chunk(astream):
if isinstance(astream, binary_type):
chunk = astream
astream = None
elif astream:
try:
chunk = next(astream)
except StopIteration:
chunk = None
else:
chunk = None
return chunk, astream
remote_path = self._canonize_url(remote_path)
verbose = self._verbose if verbose is None else verbose
direct_access_path = self._driver.get_direct_access(remote_path)
if direct_access_path:
return direct_access_path
temp_local_path = None
try:
if verbose:
self._log.info('Start downloading from %s' % remote_path)
if not overwrite_existing and Path(local_path).is_file():
self._log.warning(
'File {} already exists, no need to download, thread id = {}'.format(
local_path,
threading.current_thread().ident,
),
)
return local_path
temp_local_path = '{}_{}{}'.format(local_path, time(), self._temp_download_suffix)
obj = self._get_object(remote_path)
if not obj:
return None
# object size in bytes
total_size_mb = -1
dl_total_mb = 0.
download_reported = False
# chunks size is ignored and always 5Mb
chunk_size_mb = 5
# make sure we have the destination folder
# noinspection PyBroadException
Path(temp_local_path).parent.mkdir(parents=True, exist_ok=True)
# try to get file size
try:
if isinstance(self._driver, _HttpDriver) and obj:
obj = self._driver._get_download_object(obj)
total_size_mb = float(obj.headers.get('Content-Length', 0)) / (1024 * 1024)
elif hasattr(obj, 'size'):
size = obj.size
# Google storage has the option to reload the object to get the size
if size is None and hasattr(obj, 'reload'):
obj.reload()
size = obj.size
total_size_mb = 0 if size is None else float(size) / (1024 * 1024)
elif hasattr(obj, 'content_length'):
total_size_mb = float(obj.content_length) / (1024 * 1024)
except (ValueError, AttributeError, KeyError):
pass
# if driver supports download with callback, use it (it might be faster)
if hasattr(self._driver, 'download_object'):
# callback
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self._log)
self._driver.download_object(obj, temp_local_path, callback=cb)
download_reported = bool(cb.last_reported)
dl_total_mb = cb.current_status_mb
else:
stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024)
if stream is None:
raise ValueError('Could not download %s' % remote_path)
with open(temp_local_path, 'wb') as fd:
data, stream = next_chunk(stream)
while data:
fd.write(data)
data, stream = next_chunk(stream)
if Path(temp_local_path).stat().st_size <= 0:
raise Exception('downloaded a 0-sized file')
# if we are on windows, we need to remove the target file before renaming
# otherwise posix rename will overwrite the target
if os.name != 'posix':
try:
os.remove(local_path)
except Exception:
pass
# rename temp file to local_file
# noinspection PyBroadException
try:
os.rename(temp_local_path, local_path)
except Exception:
# noinspection PyBroadException
try:
os.unlink(temp_local_path)
except Exception:
pass
# file was downloaded by a parallel process, check we have the final output and delete the partial copy
path_local_path = Path(local_path)
if not path_local_path.is_file() or path_local_path.stat().st_size <= 0:
raise Exception('Failed renaming partial file, downloaded file exists and a 0-sized file')
# report download if we are on the second chunk
if verbose or download_reported:
self._log.info(
'Downloaded %.2f MB successfully from %s , saved to %s' % (dl_total_mb, remote_path, local_path))
return local_path
except DownloadError:
raise
except Exception as e:
self._log.error("Could not download {} , err: {} ".format(remote_path, e))
if delete_on_failure:
# noinspection PyBroadException
try:
if temp_local_path:
os.remove(temp_local_path)
except Exception:
pass
return None
def download_as_stream(self, remote_path, chunk_size=None):
remote_path = self._canonize_url(remote_path)
try:
obj = self._get_object(remote_path)
return self._driver.download_object_as_stream(
obj, chunk_size=chunk_size, verbose=self._verbose, log=self.log
)
except DownloadError:
raise
except Exception as e:
self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e)))
return None
def download_as_nparray(self, remote_path, chunk_size=None):
try:
stream = self.download_as_stream(remote_path, chunk_size)
if stream is None:
return
# TODO: ugly py3 hack, please remove ASAP
if six.PY3 and not isinstance(stream, GeneratorType):
import numpy as np
return np.frombuffer(stream, dtype=np.uint8)
else:
import numpy as np
return np.asarray(bytearray(b''.join(stream)), dtype=np.uint8)
except Exception as e:
self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e)))
def delete(self, path):
return self._driver.delete_object(self._get_object(path))
def check_write_permissions(self, dest_path=None):
# create a temporary file, then delete it
base_url = dest_path or self._base_url
dest_path = base_url + '/.clearml.test'
# do not check http/s connection permissions
if dest_path.startswith('http'):
return True
try:
self.upload_from_stream(stream=six.BytesIO(b'clearml'), dest_path=dest_path)
self.delete(path=dest_path)
except Exception:
raise ValueError('Insufficient permissions for {}'.format(base_url))
return True
@classmethod
def download_from_url(cls, remote_url, local_path, overwrite_existing=False):
helper = cls.get(remote_url)
if not helper:
return None
return helper.download_to_file(remote_url, local_path, overwrite_existing=overwrite_existing)
@classmethod
def _canonize_url(cls, url):
return cls._apply_url_substitutions(url)
@classmethod
def _apply_url_substitutions(cls, url):
def replace_separator(_url, where, sep):
return _url[:where] + _url[where:].replace(sep, os.sep)
for index, rule in enumerate(cls._path_substitutions):
if url.startswith(rule.registered_prefix):
url = url.replace(
rule.registered_prefix,
rule.local_prefix,
1, # count. str.replace() does not support keyword arguments
)
if rule.replace_windows_sep:
url = replace_separator(url, len(rule.local_prefix), '\\')
if rule.replace_linux_sep:
url = replace_separator(url, len(rule.local_prefix), '/')
break
return url
@classmethod
def _resolve_base_url(cls, base_url):
parsed = urlparse(base_url)
if parsed.scheme == _Boto3Driver.scheme:
conf = cls._s3_configurations.get_config_by_uri(base_url)
bucket = conf.bucket
if not bucket:
parts = Path(parsed.path.strip('/')).parts
if parts:
bucket = parts[0]
return '/'.join(x for x in ('s3:/', conf.host, bucket) if x)
elif parsed.scheme == _AzureBlobServiceStorageDriver.scheme:
conf = cls._azure_configurations.get_config_by_uri(base_url)
if not conf:
raise StorageError("Can't find azure configuration for {}".format(base_url))
return str(furl(base_url).set(path=conf.container_name))
elif parsed.scheme == _GoogleCloudStorageDriver.scheme:
conf = cls._gs_configurations.get_config_by_uri(base_url)
return str(furl(scheme=parsed.scheme, netloc=conf.bucket))
elif parsed.scheme == 'http':
return 'http://'
elif parsed.scheme == 'https':
return 'https://'
else:
return 'file://'
@classmethod
def conform_url(cls, folder_uri, base_url=None):
if not folder_uri:
return folder_uri
_base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url
if not folder_uri.startswith(_base_url):
prev_folder_uri = folder_uri
if _base_url == 'file://':
folder_uri = str(Path(folder_uri).absolute())
if folder_uri.startswith('/'):
folder_uri = _base_url + folder_uri
else:
folder_uri = '/'.join((_base_url, folder_uri))
cls._get_logger().debug('Upload destination {} amended to {} for registration purposes'.format(
prev_folder_uri, folder_uri))
else:
raise ValueError('folder_uri: {} does not start with base url: {}'.format(folder_uri, _base_url))
return folder_uri
def _absolute_object_name(self, path):
if not path.startswith(self.base_url):
return self.base_url.rstrip('/') + '///' + path.lstrip('/')
return path
def _normalize_object_name(self, path):
if path.startswith(self.base_url):
path = path[len(self.base_url):]
if path.startswith('/') and os.name == 'nt':
path = path[1:]
if self.scheme in (_Boto3Driver.scheme, _GoogleCloudStorageDriver.scheme,
_AzureBlobServiceStorageDriver.scheme):
path = path.lstrip('/')
return path
def _do_async_upload(self, data):
assert isinstance(data, self._UploadData)
return self._do_upload(data.src_path, data.dest_path, extra=data.extra, cb=data.callback,
verbose=True, retries=data.retries)
def _upload_from_file(self, local_path, dest_path, extra=None):
if not hasattr(self._driver, 'upload_object'):
with open(local_path, 'rb') as stream:
res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra)
else:
object_name = self._normalize_object_name(dest_path)
extra = extra.copy() if extra else {}
extra.update(self._extra)
cb = UploadProgressReport.from_file(local_path, self._verbose, self._log)
res = self._driver.upload_object(
file_path=local_path,
container=self._container,
object_name=object_name,
callback=cb,
extra=extra)
return res
def _do_upload(self, src_path, dest_path, extra=None, cb=None, verbose=False, retries=1):
object_name = self._normalize_object_name(dest_path)
if cb:
try:
cb(None)
except Exception as e:
self._log.error("Calling upload callback when starting upload: %s" % str(e))
if verbose:
msg = 'Starting upload: {} => {}{}'.format(
src_path,
(self._container.name if self._container.name.endswith('/') else self._container.name + '/')
if self._container and self._container.name else '', object_name)
if object_name.startswith('file://') or object_name.startswith('/'):
self._log.debug(msg)
else:
self._log.info(msg)
last_ex = None
for i in range(max(1, retries)):
try:
if not self._upload_from_file(local_path=src_path, dest_path=dest_path, extra=extra):
last_ex = ValueError("Upload failed")
continue
last_ex = None
break
except Exception as e:
last_ex = e
if last_ex:
self._log.error("Exception encountered while uploading %s" % str(last_ex))
if cb:
try:
cb(False)
except Exception as e:
self._log.warning("Exception on upload callback: %s" % str(e))
raise last_ex
if verbose:
self._log.debug("Finished upload: %s => %s" % (src_path, object_name))
if cb:
try:
cb(dest_path)
except Exception as e:
self._log.warning("Exception on upload callback: %s" % str(e))
return dest_path
def _get_object(self, path):
object_name = self._normalize_object_name(path)
try:
return self._driver.get_object(
container_name=self._container.name if self._container else '', object_name=object_name)
except ConnectionError:
raise DownloadError
except Exception as e:
self.log.warning('Storage helper problem for {}: {}'.format(str(object_name), str(e)))
return None
@staticmethod
def _initialize_upload_pool():
if not StorageHelper._upload_pool:
StorageHelper._upload_pool = ThreadPool(processes=1)
@staticmethod
def close_async_threads():
if StorageHelper._upload_pool:
pool = StorageHelper._upload_pool
StorageHelper._upload_pool = None
try:
pool.terminate()
pool.join()
except Exception:
pass
class _HttpDriver(_Driver):
timeout = (5.0, 30.)
min_kbps_speed = 50
schemes = ('http', 'https')
class _Container(object):
_default_backend_session = None
_default_files_server_host = None
def __init__(self, name, retries=5, **kwargs):
self.name = name
self.session = get_http_session_with_retry(total=retries, connect=retries, read=retries, redirect=retries)
def get_headers(self, url):
if not self._default_backend_session:
from ..backend_interface.base import InterfaceBase
self._default_backend_session = InterfaceBase._get_default_session()
if self._default_files_server_host is None:
self._default_files_server_host = self._default_backend_session.get_files_server_host().rstrip('/')
if url == self._default_files_server_host or url.startswith(self._default_files_server_host + '/'):
return self._default_backend_session.add_auth_headers({})
return None
class _HttpSessionHandle(object):
def __init__(self, url, is_stream, container_name, object_name):
self.url, self.is_stream, self.container_name, self.object_name = \
url, is_stream, container_name, object_name
def __init__(self, retries=5):
self._retries = retries
self._containers = {}
def get_container(self, container_name, config=None, **kwargs):
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, extra=None, callback=None, **kwargs):
url = object_name[:object_name.index('/')]
url_path = object_name[len(url) + 1:]
full_url = container.name + url
timeout = self.timeout[-1]
stream_size = 0
if hasattr(iterator, 'tell') and hasattr(iterator, 'seek'):
pos = iterator.tell()
iterator.seek(0, 2)
stream_size = iterator.tell() - pos
iterator.seek(pos, 0)
timeout = max(timeout, (stream_size / 1024) / float(self.min_kbps_speed))
res = container.session.post(full_url, files={url_path: iterator}, timeout=timeout,
headers=container.get_headers(full_url))
if res.status_code != requests.codes.ok:
raise ValueError('Failed uploading object %s (%d): %s' % (object_name, res.status_code, res.text))
return res
def list_container_objects(self, *args, **kwargs):
raise NotImplementedError('List is not implemented for http protocol')
def delete_object(self, obj, *args, **kwargs):
assert isinstance(obj, self._HttpSessionHandle)
container = self._containers[obj.container_name]
res = container.session.delete(obj.url, headers=container.get_headers(obj.url))
if res.status_code != requests.codes.ok:
self._get_logger().warning('Failed deleting object %s (%d): %s' % (
obj.object_name, res.status_code, res.text))
return False
return True
def get_object(self, container_name, object_name, *args, **kwargs):
is_stream = kwargs.get('stream', True)
url = ''.join((container_name, object_name.lstrip('/')))
return self._HttpSessionHandle(url, is_stream, container_name, object_name)
def _get_download_object(self, obj):
if not isinstance(obj, self._HttpSessionHandle):
return obj
container = self._containers[obj.container_name]
container.session.stream = obj.is_stream
res = container.session.get(obj.url, timeout=self.timeout, headers=container.get_headers(obj.url))
if res.status_code != requests.codes.ok:
raise ValueError('Failed getting object %s (%d): %s' % (obj.object_name, res.status_code, res.text))
return res
def download_object_as_stream(self, obj, chunk_size=64 * 1024, **_):
obj = self._get_download_object(obj)
return obj.iter_content(chunk_size=chunk_size)
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
obj = self._get_download_object(obj)
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
length = 0
with p.open(mode='wb') as f:
for chunk in obj.iter_content(chunk_size=5 * 1024 * 1024):
if not chunk:
continue
chunk_size = len(chunk)
f.write(chunk)
length += chunk_size
if callback:
callback(chunk_size)
return length
def get_direct_access(self, remote_path, **_):
return None
def test_upload(self, test_path, config, **kwargs):
return True
def upload_object(self, file_path, container, object_name, extra, callback=None, **kwargs):
with open(file_path, 'rb') as stream:
return self.upload_object_via_stream(iterator=stream, container=container,
object_name=object_name, extra=extra, callback=callback, **kwargs)
class _Stream(object):
encoding = None
mode = 'rw'
name = ''
newlines = '\n'
softspace = False
def __init__(self, input_iterator=None):
self.closed = False
self._buffer = Queue()
self._input_iterator = input_iterator
self._leftover = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def close(self):
self.closed = True
def flush(self):
pass
def fileno(self):
return 87
def isatty(self):
return False
def next(self):
while not self.closed or not self._buffer.empty():
if self._input_iterator:
try:
chunck = next(self._input_iterator)
return chunck
except StopIteration:
self.closed = True
raise StopIteration()
except Exception as ex:
_Driver.get_logger().error('Failed downloading: %s' % ex)
else:
try:
return self._buffer.get(block=True, timeout=1.)
except Empty:
pass
raise StopIteration()
def read(self, size=None):
try:
data = self.next() if self._leftover is None else self._leftover
except StopIteration:
return six.b('')
self._leftover = None
try:
while size is None or not data or len(data) < size:
chunk = self.next()
if chunk is not None:
if data is not None:
data += chunk
else:
data = chunk
except StopIteration:
pass
if size is not None and data and len(data) > size:
self._leftover = data[size:]
return data[:size]
return data
def readline(self, size=None):
return self.read(size)
def readlines(self, sizehint=None):
pass
def truncate(self, size=None):
pass
def write(self, bytes):
self._buffer.put(bytes, block=True)
def writelines(self, sequence):
for s in sequence:
self.write(s)
class _Boto3Driver(_Driver):
_min_pool_connections = 512
_max_multipart_concurrency = deferred_config('aws.boto3.max_multipart_concurrency', 16)
_pool_connections = deferred_config('aws.boto3.pool_connections', 512)
_stream_download_pool_connections = 128
_stream_download_pool = None
_containers = {}
scheme = 's3'
scheme_prefix = str(furl(scheme=scheme, netloc=''))
_bucket_location_failure_reported = set()
class _Container(object):
_creation_lock = threading.Lock()
def __init__(self, name, cfg):
try:
import boto3
import botocore.client
from botocore.exceptions import ClientError
except ImportError:
raise UsageError(
'AWS S3 storage driver (boto3) not found. '
'Please install driver using: pip install \"boto3>=1.9\"'
)
self.name = name[5:]
endpoint = (('https://' if cfg.secure else 'http://') + cfg.host) if cfg.host else None
with self._creation_lock:
boto_kwargs = {
"endpoint_url": endpoint,
"use_ssl": cfg.secure,
"verify": cfg.verify,
"config": botocore.client.Config(
max_pool_connections=max(
_Boto3Driver._min_pool_connections,
_Boto3Driver._pool_connections)
)
}
if not cfg.use_credentials_chain:
boto_kwargs["aws_access_key_id"] = cfg.key
boto_kwargs["aws_secret_access_key"] = cfg.secret
self.resource = boto3.resource(
's3',
**boto_kwargs
)
self.config = cfg
bucket_name = self.name[len(cfg.host) + 1:] if cfg.host else self.name
self.bucket = self.resource.Bucket(bucket_name)
@attrs
class ListResult(object):
name = attrib(default=None)
def __init__(self):
pass
def _get_stream_download_pool(self):
if self._stream_download_pool is None:
self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)
return self._stream_download_pool
def get_container(self, container_name, config=None, **kwargs):
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, cfg=config)
self._containers[container_name].config.retries = kwargs.get('retries', 5)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):
import boto3.s3.transfer
stream = _Stream(iterator)
try:
container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries),
Callback=callback,
)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):
import boto3.s3.transfer
try:
container.bucket.upload_file(file_path, object_name, Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries),
Callback=callback)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def list_container_objects(self, container, ex_prefix=None, **kwargs):
if ex_prefix:
res = container.bucket.objects.filter(Prefix=ex_prefix)
else:
res = container.bucket.objects.all()
for res in res:
yield self.ListResult(name=res.key)
def delete_object(self, object, **kwargs):
from botocore.exceptions import ClientError
object.delete()
try:
# Try loading the file to verify deletion
object.load()
return False
except ClientError as e:
return int(e.response['Error']['Code']) == 404
def get_object(self, container_name, object_name, *args, **kwargs):
full_container_name = 's3://' + container_name
container = self._containers[full_container_name]
obj = container.resource.Object(container.bucket.name, object_name)
obj.container_name = full_container_name
return obj
def download_object_as_stream(self, obj, chunk_size=64 * 1024, verbose=None, log=None, **_):
def async_download(a_obj, a_stream, cb, cfg):
try:
a_obj.download_fileobj(a_stream, Callback=cb, Config=cfg)
except Exception as ex:
(log or self.get_logger()).error('Failed downloading: %s' % ex)
a_stream.close()
import boto3.s3.transfer
# return iterable object
stream = _Stream()
container = self._containers[obj.container_name]
config = boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries)
total_size_mb = obj.content_length / (1024. * 1024.)
remote_path = os.path.join(obj.container_name, obj.key)
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, log)
self._get_stream_download_pool().submit(async_download, obj, stream, cb, config)
return stream
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
import boto3.s3.transfer
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
container = self._containers[obj.container_name]
obj.download_file(str(p),
Callback=callback,
Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries))
@classmethod
def _test_bucket_config(cls, conf, log, test_path='', raise_on_error=True, log_on_error=True):
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
return False
if not conf.bucket:
return False
try:
if not conf.is_valid():
raise Exception('Missing credentials')
fullname = furl(conf.bucket).add(path=test_path).add(path='%s-upload_test' % cls.__module__)
bucket_name = str(fullname.path.segments[0])
filename = str(furl(path=fullname.path.segments[1:]))
data = {
'user': getpass.getuser(),
'machine': gethostname(),
'time': datetime.utcnow().isoformat()
}
boto_session = boto3.Session(conf.key, conf.secret)
boto_resource = boto_session.resource('s3', conf.region)
bucket = boto_resource.Bucket(bucket_name)
bucket.put_object(Key=filename, Body=six.b(json.dumps(data)))
region = cls._get_bucket_region(conf=conf, log=log, report_info=True)
if region and ((conf.region and region != conf.region) or (not conf.region and region != 'us-east-1')):
msg = "incorrect region specified for bucket %s (detected region %s)" % (conf.bucket, region)
else:
return True
except ClientError as ex:
msg = ex.response['Error']['Message']
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise
except Exception as ex:
msg = str(ex)
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise
msg = ("Failed testing access to bucket %s: " % conf.bucket) + msg
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise StorageError(msg)
return False
@classmethod
def _get_bucket_region(cls, conf, log=None, report_info=False):
import boto3
from botocore.exceptions import ClientError
if not conf.bucket:
return None
def report(msg):
if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported:
if report_info:
log.debug(msg)
else:
log.warning(msg)
cls._bucket_location_failure_reported.add(conf.get_bucket_host())
try:
boto_session = boto3.Session(conf.key, conf.secret)
boto_resource = boto_session.resource('s3')
return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)["LocationConstraint"]
except ClientError as ex:
report("Failed getting bucket location (region) for bucket "
"%s: %s (%s, access_key=%s). Default region will be used. "
"This is normal if you do not have GET_BUCKET_LOCATION permission"
% (conf.bucket, ex.response['Error']['Message'], ex.response['Error']['Code'], conf.key))
except Exception as ex:
report("Failed getting bucket location (region) for bucket %s: %s. Default region will be used."
% (conf.bucket, str(ex)))
return None
def get_direct_access(self, remote_path, **_):
return None
def test_upload(self, test_path, config, **_):
return True
class _GoogleCloudStorageDriver(_Driver):
_stream_download_pool_connections = 128
_stream_download_pool = None
_containers = {}
scheme = 'gs'
scheme_prefix = str(furl(scheme=scheme, netloc=''))
class _Container(object):
def __init__(self, name, cfg):
try:
from google.cloud import storage
from google.oauth2 import service_account
except ImportError:
raise UsageError(
'Google cloud driver not found. '
'Please install driver using: pip install \"google-cloud-storage>=1.13.2\"'
)
self.name = name[len(_GoogleCloudStorageDriver.scheme_prefix):]
if cfg.credentials_json:
credentials = service_account.Credentials.from_service_account_file(cfg.credentials_json)
else:
credentials = None
self.client = storage.Client(project=cfg.project, credentials=credentials)
for adapter in self.client._http.adapters.values():
if cfg.pool_connections:
adapter._pool_connections = cfg.pool_connections
if cfg.pool_maxsize:
adapter._pool_maxsize = cfg.pool_maxsize
self.config = cfg
self.bucket = self.client.bucket(self.name)
def _get_stream_download_pool(self):
if self._stream_download_pool is None:
self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)
return self._stream_download_pool
def get_container(self, container_name, config=None, **kwargs):
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, cfg=config)
self._containers[container_name].config.retries = kwargs.get('retries', 5)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):
try:
blob = container.bucket.blob(object_name)
blob.upload_from_file(iterator)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def upload_object(self, file_path, container, object_name, extra=None, **kwargs):
try:
blob = container.bucket.blob(object_name)
blob.upload_from_filename(file_path)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
return True
def list_container_objects(self, container, **kwargs):
return list(container.bucket.list_blobs())
def delete_object(self, object, **kwargs):
try:
object.delete()
except Exception as ex:
try:
from google.cloud.exceptions import NotFound
if isinstance(ex, NotFound):
return False
except ImportError:
pass
name = getattr(object, "name", "")
self.get_logger().warning("Failed deleting object {}: {}".format(name, ex))
return False
return not object.exists()
def get_object(self, container_name, object_name, *args, **kwargs):
full_container_name = str(furl(scheme=self.scheme, netloc=container_name))
container = self._containers[full_container_name]
obj = container.bucket.blob(object_name)
obj.container_name = full_container_name
return obj
def download_object_as_stream(self, obj, chunk_size=256 * 1024, **_):
raise NotImplementedError('Unsupported for google storage')
def async_download(a_obj, a_stream):
try:
a_obj.download_to_file(a_stream)
except Exception as ex:
self.get_logger().error('Failed downloading: %s' % ex)
a_stream.close()
# return iterable object
stream = _Stream()
obj.chunk_size = chunk_size
self._get_stream_download_pool().submit(async_download, obj, stream)
return stream
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
obj.download_to_filename(str(p))
def test_upload(self, test_path, config, **_):
bucket_url = str(furl(scheme=self.scheme, netloc=config.bucket, path=config.subdir))
bucket = self.get_container(container_name=bucket_url, config=config).bucket
test_obj = bucket
if test_path:
if not test_path.endswith('/'):
test_path += '/'
blob = bucket.blob(test_path)
if blob.exists():
test_obj = blob
permissions_to_test = ('storage.objects.get', 'storage.objects.update')
return set(test_obj.test_iam_permissions(permissions_to_test)) == set(permissions_to_test)
def get_direct_access(self, remote_path, **_):
return None
class _AzureBlobServiceStorageDriver(_Driver):
scheme = 'azure'
_containers = {}
class _Container(object):
def __init__(self, name, config):
try:
from azure.common import AzureHttpError # noqa: F401
from azure.storage.blob import BlockBlobService
except ImportError:
raise UsageError(
'Azure blob storage driver not found. '
'Please install driver using: pip install \"azure.storage.blob<=2.1.0\"'
)
self.name = name
self.config = config
self.blob_service = BlockBlobService(
account_name=config.account_name,
account_key=config.account_key,
)
@attrs
class _Object(object):
container = attrib()
blob_name = attrib()
content_length = attrib()
def get_container(self, container_name=None, config=None, **kwargs):
container_name = container_name or config.container_name
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, config=config)
# self._containers[container_name].config.retries = kwargs.get('retries', 5)
return self._containers[container_name]
def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):
from azure.common import AzureHttpError # noqa
blob_name = self._blob_name_from_object_path(object_name, container.name) # noqa: F841
try:
container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024
container.blob_service.socket_timeout = (300, 2000)
container.blob_service.create_blob_from_bytes(
container.name,
object_name,
iterator.read() if hasattr(iterator, "read") else bytes(iterator),
# timeout=300,
max_connections=2,
progress_callback=callback,
)
return True
except AzureHttpError as ex:
self.get_logger().error('Failed uploading (Azure error): %s' % ex)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
return False
def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):
from azure.common import AzureHttpError # noqa
blob_name = self._blob_name_from_object_path(object_name, container.name)
stream = None
try:
from azure.storage.blob import ContentSettings # noqa
from mimetypes import guess_type
container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024
container.blob_service.socket_timeout = (300, 2000)
container.blob_service.create_blob_from_path(
container.name,
blob_name,
file_path,
# timeout=300,
max_connections=2,
content_settings=ContentSettings(content_type=guess_type(file_path)),
progress_callback=callback,
)
return True
except AzureHttpError as ex:
self.get_logger().error('Failed uploading (Azure error): %s' % ex)
except Exception as ex:
self.get_logger().error('Failed uploading: %s' % ex)
finally:
if stream:
stream.close()
def list_container_objects(self, container, ex_prefix=None, **kwargs):
return list(container.blob_service.list_blobs(container_name=container.name, prefix=ex_prefix))
def delete_object(self, object, **kwargs):
container = object.container
container.blob_service.delete_blob(
container.name,
object.blob_name,
)
return not object.container.blob_service.exists(container.name, object.blob_name)
def get_object(self, container_name, object_name, *args, **kwargs):
container = self._containers.get(container_name)
if not container:
raise StorageError("Container `{}` not found for object {}".format(container_name, object_name))
# blob_name = self._blob_name_from_object_path(object_name, container_name)
blob = container.blob_service.get_blob_properties(container.name, object_name)
return self._Object(container=container, blob_name=blob.name, content_length=blob.properties.content_length)
def download_object_as_stream(self, obj, verbose, *_, **__):
container = obj.container
total_size_mb = obj.content_length / (1024. * 1024.)
remote_path = os.path.join(
"{}://".format(self.scheme),
container.config.account_name,
container.name,
obj.blob_name
)
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self.get_logger())
blob = container.blob_service.get_blob_to_bytes(
container.name,
obj.blob_name,
progress_callback=cb,
)
return blob.content
def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))
return
download_done = threading.Event()
download_done.counter = 0
def callback_func(current, total):
if callback:
chunk = current - download_done.counter
download_done.counter += chunk
callback(chunk)
if current >= total:
download_done.set()
container = obj.container
container.blob_service.MAX_SINGLE_GET_SIZE = 5 * 1024 * 1024
_ = container.blob_service.get_blob_to_path(
container.name,
obj.blob_name,
local_path,
max_connections=10,
progress_callback=callback_func,
)
download_done.wait()
def test_upload(self, test_path, config, **_):
container = self.get_container(config=config)
try:
container.blob_service.get_container_properties(container.name)
except Exception:
return False
else:
# Using the account Key, we can always upload...
return True
@classmethod
def _blob_name_from_object_path(cls, name, container_name):
scheme = urlparse(name).scheme
if scheme:
if scheme != cls.scheme:
raise StorageError(
"When using a URL, only the `{}` scheme is supported for Azure storage: {}",
cls.scheme,
name,
)
f = furl(name)
if not f.path.segments:
raise StorageError(
"Missing container name in URL {}",
name,
)
parsed_container_name = f.path.segments[0]
if parsed_container_name != container_name:
raise StorageError(
"Container name mismatch (expected {}, found {}) in {}",
container_name,
parsed_container_name,
name,
)
if len(f.path.segments) == 1:
raise StorageError(
"No path found following container name {} in {}",
container_name,
name,
)
return f.path.segments[0], os.path.join(*f.path.segments[1:])
return name
def get_direct_access(self, remote_path, **_):
return None
class _FileStorageDriver(_Driver):
scheme = "file"
CHUNK_SIZE = 8096
IGNORE_FOLDERS = ['.lock', '.hash']
Object = namedtuple("Object", ['name', 'size', 'extra', 'driver', 'container', 'hash', 'meta_data'])
class _Container(object):
def __init__(self, name, extra, driver):
self.name = name
self.extra = extra
self.driver = driver
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
# Use the key as the path to the storage
self.base_path = key
def _make_path(self, path, ignore_existing=True):
try:
os.makedirs(path)
except OSError:
exp = sys.exc_info()[1]
if exp.errno == errno.EEXIST and not ignore_existing:
raise exp
def _check_container_name(self, container_name):
if '/' in container_name or '\\' in container_name:
raise ValueError("Container name \"{}\" cannot contain \\ or / ".format(container_name))
def _make_container(self, container_name):
container_name = container_name or '.'
self._check_container_name(container_name)
full_path = os.path.realpath(os.path.join(self.base_path, container_name))
try:
stat = os.stat(full_path)
if not os.path.isdir(full_path):
raise OSError("Target path \"{}\" is not a directory".format(full_path))
except OSError:
raise OSError("Target path \"{}\" is not accessible or does not exist".format(full_path))
extra = {
'creation_time': stat.st_ctime,
'access_time': stat.st_atime,
'modify_time': stat.st_mtime,
}
return self._Container(name=container_name, extra=extra, driver=self)
def _make_object(self, container, object_name):
full_path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.', object_name))
if os.path.isdir(full_path):
raise ValueError("Target path \"{}\" already exist".format(full_path))
try:
stat = os.stat(full_path)
except Exception:
raise ValueError("Cannot access target path \"{}\"".format(full_path))
extra = {
'creation_time': stat.st_ctime,
'access_time': stat.st_atime,
'modify_time': stat.st_mtime,
}
return self.Object(name=object_name, size=stat.st_size, extra=extra,
driver=self, container=container, hash=None, meta_data=None)
def iterate_containers(self):
for container_name in os.listdir(self.base_path):
full_path = os.path.join(self.base_path, container_name)
if not os.path.isdir(full_path):
continue
yield self._make_container(container_name)
def _get_objects(self, container):
cpath = self.get_container_cdn_url(container, check=True)
for folder, subfolders, files in os.walk(cpath, topdown=True):
# Remove unwanted subfolders
for subf in self.IGNORE_FOLDERS:
if subf in subfolders:
subfolders.remove(subf)
for name in files:
full_path = os.path.join(folder, name)
object_name = os.path.relpath(full_path, start=cpath)
yield self._make_object(container, object_name)
def iterate_container_objects(self, container):
return self._get_objects(container)
def get_container(self, container_name, **_):
return self._make_container(container_name)
def get_container_cdn_url(self, container, check=False):
path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.'))
if check and not os.path.isdir(path):
raise ValueError("Target path \"{}\" does not exist".format(path))
return path
def get_object(self, container_name, object_name, **_):
container = self._make_container(container_name)
return self._make_object(container, object_name)
def get_object_cdn_url(self, obj):
return os.path.realpath(os.path.join(self.base_path, obj.container.name, obj.name))
def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True, **_):
obj_path = self.get_object_cdn_url(obj)
base_name = os.path.basename(destination_path)
if not base_name and not os.path.exists(destination_path):
raise ValueError('Path \"{}\" does not exist'.format(destination_path))
if not base_name:
file_path = os.path.join(destination_path, obj.name)
else:
file_path = destination_path
if os.path.exists(file_path) and not overwrite_existing:
raise ValueError('File \"{}\" already exists, but overwrite_existing=False'.format(file_path))
try:
shutil.copy(obj_path, file_path)
except IOError:
if delete_on_failure:
# noinspection PyBroadException
try:
os.unlink(file_path)
except Exception:
pass
return False
return True
def download_object_as_stream(self, obj, chunk_size=None, **_):
path = self.get_object_cdn_url(obj)
with open(path, 'rb') as obj_file:
for data in self._read_in_chunks(obj_file, chunk_size=chunk_size):
yield data
def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, **_):
path = self.get_container_cdn_url(container, check=True)
obj_path = os.path.join(path, object_name)
base_path = os.path.dirname(obj_path)
self._make_path(base_path)
shutil.copy(file_path, obj_path)
os.chmod(obj_path, int('664', 8))
return self._make_object(container, object_name)
def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):
path = self.get_container_cdn_url(container, check=True)
obj_path = os.path.join(path, object_name)
base_path = os.path.dirname(obj_path)
self._make_path(base_path)
obj_path = os.path.realpath(obj_path)
with open(obj_path, 'wb' if not isinstance(iterator, StringIO) else 'wt') as obj_file:
obj_file.write(iterator.read() if hasattr(iterator, 'read') else bytes(iterator))
os.chmod(obj_path, int('664', 8))
return self._make_object(container, object_name)
def delete_object(self, obj, **_):
path = self.get_object_cdn_url(obj)
try:
os.unlink(path)
except Exception:
return False
# # Check and delete all the empty parent folders
# path = os.path.dirname(path)
# container_url = obj.container.get_cdn_url()
#
# # Delete the empty parent folders till the container's level
return True
def create_container(self, container_name):
container_name = container_name or '.'
self._check_container_name(container_name)
path = os.path.join(self.base_path, container_name)
try:
self._make_path(path, ignore_existing=False)
except OSError:
exp = sys.exc_info()[1]
if exp.errno == errno.EEXIST:
raise ValueError('Container \"{}\" with this name already exists. The name '
'must be unique among all the containers in the '
'system'.format(container_name))
else:
raise ValueError('Error creating container \"{}\"'.format(container_name))
except Exception:
raise ValueError('Error creating container \"{}\"'.format(container_name))
return self._make_container(container_name)
def delete_container(self, container):
for obj in self._get_objects(container):
raise ValueError('Container \"{}\" is not empty'.format(container.name))
path = self.get_container_cdn_url(container, check=True)
try:
shutil.rmtree(path)
except Exception:
return False
return True
def list_container_objects(self, container, **kwargs):
return list(self.iterate_container_objects(container))
@staticmethod
def _read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False):
chunk_size = chunk_size or _FileStorageDriver.CHUNK_SIZE
if six.PY3:
from io import FileIO as file
if isinstance(iterator, (file)):
get_data = iterator.read
args = (chunk_size,)
else:
get_data = next
args = (iterator,)
data = bytes('')
empty = False
while not empty or len(data) > 0:
if not empty:
try:
chunk = bytes(get_data(*args))
if len(chunk) > 0:
data += chunk
else:
empty = True
except StopIteration:
empty = True
if len(data) == 0:
if empty and yield_empty:
yield bytes('')
return
if fill_size:
if empty or len(data) >= chunk_size:
yield data[:chunk_size]
data = data[chunk_size:]
else:
yield data
data = bytes('')
def get_direct_access(self, remote_path, **_):
full_url = StorageHelper.conform_url(remote_path)
path = Path(full_url[7:])
if not path.exists():
raise ValueError("Requested path does not exist: {}".format(path))
return path.as_posix()
def test_upload(self, test_path, config, **kwargs):
return True
driver_schemes = set(
filter(
None,
itertools.chain(
(getattr(cls, "scheme", None) for cls in _Driver.__subclasses__()),
*(getattr(cls, "schemes", []) for cls in _Driver.__subclasses__())
)
)
)
remote_driver_schemes = driver_schemes - {_FileStorageDriver.scheme}
| true | true |
f71bca0add55e9dbed05726a9b8f1b5d8f31a0fe | 544 | py | Python | loja.py | Felipe-Gs/Exerciccios-Python3 | bdbd49e7daa48df336b83ef3a2e36e42ede297ab | [
"MIT"
] | 3 | 2021-06-01T14:11:58.000Z | 2022-03-20T02:30:13.000Z | loja.py | Felipe-Gs/Exercicios-Python3 | bdbd49e7daa48df336b83ef3a2e36e42ede297ab | [
"MIT"
] | null | null | null | loja.py | Felipe-Gs/Exercicios-Python3 | bdbd49e7daa48df336b83ef3a2e36e42ede297ab | [
"MIT"
] | null | null | null | '''quantidade = int(input('quantos produtos o senhor comprou?'))
preco = quantidade * 1.99
print(preco)'''
print("====TABELA DE PREÇOS====")
for c in range(1, 51):
print(c, "- R$", c * 1.99)
quantidade = int(input('quantos produtos o senhor comprou?'))
preco = quantidade * 1.99
print(preco, " é o preço que voce tem que pagar!")
dinheiro = float(input('com quanto dinheiro vc pretende pagar?'))
if dinheiro <= preco:
print('dinheiro insuficiente')
else:
troco = dinheiro - preco
print("obrigado, esse é seu troco:",troco )
| 32 | 66 | 0.665441 | print("====TABELA DE PREÇOS====")
for c in range(1, 51):
print(c, "- R$", c * 1.99)
quantidade = int(input('quantos produtos o senhor comprou?'))
preco = quantidade * 1.99
print(preco, " é o preço que voce tem que pagar!")
dinheiro = float(input('com quanto dinheiro vc pretende pagar?'))
if dinheiro <= preco:
print('dinheiro insuficiente')
else:
troco = dinheiro - preco
print("obrigado, esse é seu troco:",troco )
| true | true |
f71bca13cf7f1910de2246a9b822851a12529735 | 11,981 | py | Python | downstream/tinypersons/mmdet/datasets/pipelines/formating.py | bwconrad/solo-learn | ec510d803a4428d7d8803b90fa1484c42cb9cb52 | [
"MIT"
] | 271 | 2021-06-19T08:41:11.000Z | 2022-03-31T05:42:42.000Z | downstream/tinypersons/mmdet/datasets/pipelines/formating.py | bwconrad/solo-learn | ec510d803a4428d7d8803b90fa1484c42cb9cb52 | [
"MIT"
] | 48 | 2021-07-06T07:17:12.000Z | 2022-03-14T11:38:36.000Z | downstream/tinypersons/mmdet/datasets/pipelines/formating.py | bwconrad/solo-learn | ec510d803a4428d7d8803b90fa1484c42cb9cb52 | [
"MIT"
] | 54 | 2021-07-07T08:40:49.000Z | 2022-03-16T05:02:35.000Z | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to transpose the channel order of data in results.
Args:
results (dict): Result dict contains the data to transpose.
Returns:
dict: The result dict contains the data transposed to \
``self.order``.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))``.
"""
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to \
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle:
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with \
default bundle.
"""
if 'img' in results:
img = results['img']
# add default meta keys
results = self._add_default_meta_keys(results)
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
return results
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple \
(h, w, c). Note that images may be zero padded on the \
bottom/right if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists:
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='Pad', size_divisor=32),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapFieldsToLists')
>>> ]
"""
def __call__(self, results):
"""Call function to wrap fields into lists.
Args:
results (dict): Result dict contains the data to wrap.
Returns:
dict: The result dict where value of ``self.keys`` are wrapped \
into list.
"""
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| 32.824658 | 79 | 0.591687 | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer:
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle:
def __call__(self, results):
if 'img' in results:
img = results['img']
results = self._add_default_meta_keys(results)
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
return results
def _add_default_meta_keys(self, results):
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect:
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists:
def __call__(self, results):
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| true | true |
f71bcae3ada7450e03517099c8f34cabe1579cb5 | 1,307 | py | Python | trust_simple_three/tests.py | gerhardriener/cherry_picking_code | 5dbcd32ad2e9929eac785eb06e7d44c60b1ffc18 | [
"MIT"
] | null | null | null | trust_simple_three/tests.py | gerhardriener/cherry_picking_code | 5dbcd32ad2e9929eac785eb06e7d44c60b1ffc18 | [
"MIT"
] | null | null | null | trust_simple_three/tests.py | gerhardriener/cherry_picking_code | 5dbcd32ad2e9929eac785eb06e7d44c60b1ffc18 | [
"MIT"
] | null | null | null | from otree.api import Currency as c, currency_range, SubmissionMustFail
from . import pages
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
cases = [
{'offer': 1, 'return_1': 1, 'return_2': 1,
'return_2_A': 1, 'return_2_B': 1,
'p1_payoff': 5, 'p2_payoff': 5, 'p3_payoff': 5},
]
def play_round(self):
case = self.case
if self.player.id_in_group == 1:
yield (pages.Send, {"sent_amount": case['offer']})
elif self.player.id_in_group == 2:
yield (pages.SendBack1, {"sent_back_amount_1": case['return_1']})
else:
for invalid_return in [-1, case['offer']
* Constants.multiplier + 1]:
yield SubmissionMustFail(pages.SendBack,
{'sent_back_amount_1':
invalid_return})
yield (pages.SendBack, {'sent_back_amount_1': case['return']})
if self.player.id_in_group == 1:
expected_payoff = case['p1_payoff']
elif self.player.id_in_group == 2:
expected_payoff = case['p2_payoff']
else:
expected_payoff = case['p3_payoff']
assert self.player.payoff == expected_payoff
| 34.394737 | 77 | 0.55394 | from otree.api import Currency as c, currency_range, SubmissionMustFail
from . import pages
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
cases = [
{'offer': 1, 'return_1': 1, 'return_2': 1,
'return_2_A': 1, 'return_2_B': 1,
'p1_payoff': 5, 'p2_payoff': 5, 'p3_payoff': 5},
]
def play_round(self):
case = self.case
if self.player.id_in_group == 1:
yield (pages.Send, {"sent_amount": case['offer']})
elif self.player.id_in_group == 2:
yield (pages.SendBack1, {"sent_back_amount_1": case['return_1']})
else:
for invalid_return in [-1, case['offer']
* Constants.multiplier + 1]:
yield SubmissionMustFail(pages.SendBack,
{'sent_back_amount_1':
invalid_return})
yield (pages.SendBack, {'sent_back_amount_1': case['return']})
if self.player.id_in_group == 1:
expected_payoff = case['p1_payoff']
elif self.player.id_in_group == 2:
expected_payoff = case['p2_payoff']
else:
expected_payoff = case['p3_payoff']
assert self.player.payoff == expected_payoff
| true | true |
f71bcbceeb060b7a31ed8e3353c036d8c37f27b4 | 621 | py | Python | supriya/ugens/BRF.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/BRF.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/BRF.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import collections
from supriya import CalculationRate
from supriya.ugens.Filter import Filter
class BRF(Filter):
"""
A 2nd order Butterworth band-reject filter.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> b_r_f =supriya.ugens.BRF.ar(source=source)
>>> b_r_f
BRF.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Filter UGens"
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 440.0), ("reciprocal_of_q", 1.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| 21.413793 | 79 | 0.645733 | import collections
from supriya import CalculationRate
from supriya.ugens.Filter import Filter
class BRF(Filter):
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 440.0), ("reciprocal_of_q", 1.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| true | true |
f71bcc0ec5e7d7ea7b83b7093e62b94f604e0b6e | 1,989 | py | Python | visualizer.py | shaandesai1/transfer_diffeq | 29ab4f3ff16a58bc7b1751428e540a3bb135778c | [
"MIT"
] | null | null | null | visualizer.py | shaandesai1/transfer_diffeq | 29ab4f3ff16a58bc7b1751428e540a3bb135778c | [
"MIT"
] | null | null | null | visualizer.py | shaandesai1/transfer_diffeq | 29ab4f3ff16a58bc7b1751428e540a3bb135778c | [
"MIT"
] | null | null | null |
import numpy as np
import torch
import matplotlib.pyplot as plt
from neurodiffeq import diff # the differentiation operation
from neurodiffeq.conditions import IVP # the initial condition
from neurodiffeq.networks import FCNN # fully-connect neural network
from neurodiffeq.solvers import Solver1D
from neurodiffeq.callbacks import WeightCallback
from neurodiffeq.callbacks import WeightCallback1, SolutionCallback, SaddleCallback
from neurodiffeq.callbacks import PeriodLocal
from sklearn.metrics import mean_squared_error
# from sklearn.metrics.pairwise import cosine_similarity
import copy
import matplotlib.pyplot as plt
DIFFEQS_TRAIN = {
'exp': lambda u, t: [diff(u, t) + u],
'exp1': lambda u, t: [diff(u, t) - u],
'tanh': lambda u, t: [diff(u, t) + u ** 2 - 1],
'psig': lambda u, t: [diff(u, t) - 3 * u + u ** 2],
'r1': lambda u, t: [diff(u, t) - u + u ** 2 + u ** 3],
'r2': lambda u, t: [diff(u, t) + u + u ** 2],
'r3': lambda u, t: [diff(u, t) + u ** 2],
'r4': lambda u, t: [diff(u, t) - u ** 2],
'q1': lambda u, t: [diff(u, t) - u + u ** 2],
'q2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3],
'q3': lambda u, t: [diff(u, t) + u ** 2 + u ** 4],
'q4': lambda u, t: [diff(u, t) - u ** 2 - u ** 4],
'high_order1': lambda u, t: [diff(u, t) + u - u ** 2 + u ** 3 - u ** 4 + u ** 5],
'high_order2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3 + u ** 4 - u ** 5],
'baseline': lambda u, t: [diff(u,t)]
}
solsa = np.load('data/q3_train_solution/3000.npy')
solsb = np.load('data/baseline_train_solution/3000.npy')
analytical =np.load('data/q3_gt_test_solution/3000.npy')
# pre1 =np.load('data/q2_q2_pretrain_500_solution/500.npy')
# pre2 =np.load('data/baseline_q2_pretrain_500_solution/500.npy')
plt.figure()
plt.plot(solsa,label='q2')
plt.plot(solsb,label='high_order_2')
plt.plot(analytical,label='analytical_q2')
# plt.plot(pre1,label='pre_q2_q2')
# plt.plot(pre2,label='pre_baseline_q2')
plt.legend()
plt.show() | 40.591837 | 85 | 0.64002 |
import numpy as np
import torch
import matplotlib.pyplot as plt
from neurodiffeq import diff
from neurodiffeq.conditions import IVP
from neurodiffeq.networks import FCNN
from neurodiffeq.solvers import Solver1D
from neurodiffeq.callbacks import WeightCallback
from neurodiffeq.callbacks import WeightCallback1, SolutionCallback, SaddleCallback
from neurodiffeq.callbacks import PeriodLocal
from sklearn.metrics import mean_squared_error
import copy
import matplotlib.pyplot as plt
DIFFEQS_TRAIN = {
'exp': lambda u, t: [diff(u, t) + u],
'exp1': lambda u, t: [diff(u, t) - u],
'tanh': lambda u, t: [diff(u, t) + u ** 2 - 1],
'psig': lambda u, t: [diff(u, t) - 3 * u + u ** 2],
'r1': lambda u, t: [diff(u, t) - u + u ** 2 + u ** 3],
'r2': lambda u, t: [diff(u, t) + u + u ** 2],
'r3': lambda u, t: [diff(u, t) + u ** 2],
'r4': lambda u, t: [diff(u, t) - u ** 2],
'q1': lambda u, t: [diff(u, t) - u + u ** 2],
'q2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3],
'q3': lambda u, t: [diff(u, t) + u ** 2 + u ** 4],
'q4': lambda u, t: [diff(u, t) - u ** 2 - u ** 4],
'high_order1': lambda u, t: [diff(u, t) + u - u ** 2 + u ** 3 - u ** 4 + u ** 5],
'high_order2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3 + u ** 4 - u ** 5],
'baseline': lambda u, t: [diff(u,t)]
}
solsa = np.load('data/q3_train_solution/3000.npy')
solsb = np.load('data/baseline_train_solution/3000.npy')
analytical =np.load('data/q3_gt_test_solution/3000.npy')
plt.figure()
plt.plot(solsa,label='q2')
plt.plot(solsb,label='high_order_2')
plt.plot(analytical,label='analytical_q2')
plt.legend()
plt.show() | true | true |
f71bcd5fb6a9e9c5a061b59f9eb248ca4c085954 | 5,479 | py | Python | create_7segment_dataset.py | Kazuhito00/7seg-image-generator | 2ab059814348800b289c033f839c7a255b72a1ac | [
"Apache-2.0"
] | null | null | null | create_7segment_dataset.py | Kazuhito00/7seg-image-generator | 2ab059814348800b289c033f839c7a255b72a1ac | [
"Apache-2.0"
] | null | null | null | create_7segment_dataset.py | Kazuhito00/7seg-image-generator | 2ab059814348800b289c033f839c7a255b72a1ac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import argparse
import cv2 as cv
import numpy as np
from tqdm import tqdm
from create_7segment_image import create_7segment_image
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--width", help='image width', type=int, default=96)
parser.add_argument("--height", help='image height', type=int, default=96)
parser.add_argument("--number_width_min", type=float, default=0.1)
parser.add_argument("--number_width_max", type=float, default=0.9)
parser.add_argument("--number_height_min", type=float, default=0.4)
parser.add_argument("--number_height_max", type=float, default=0.9)
parser.add_argument("--thickness_min", type=float, default=0.01)
parser.add_argument("--thickness_max", type=float, default=0.25)
parser.add_argument("--blank_ratio_min", type=float, default=0.0)
parser.add_argument("--blank_ratio_max", type=float, default=0.1)
parser.add_argument("--shear_x_min", type=int, default=-10)
parser.add_argument("--shear_x_max", type=int, default=30)
parser.add_argument("--shift_x_min", type=int, default=-10)
parser.add_argument("--shift_x_max", type=int, default=10)
parser.add_argument("--shift_y_min", type=int, default=-10)
parser.add_argument("--shift_y_max", type=int, default=10)
parser.add_argument("--steps", help='create steps', type=int, default=3000)
parser.add_argument('--erase_debug_window', action='store_true')
parser.add_argument("--seed", help='random seed', type=int, default=42)
parser.add_argument("--start_count", type=int, default=0)
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
image_width = args.width
image_height = args.height
number_width_min = args.number_width_min
number_width_max = args.number_width_max
number_height_min = args.number_height_min
number_height_max = args.number_height_max
thickness_min = args.thickness_min
thickness_max = args.thickness_max
blank_ratio_min = args.blank_ratio_min
blank_ratio_max = args.blank_ratio_max
shear_x_min = args.shear_x_min
shear_x_max = args.shear_x_max
shift_x_min = args.shift_x_min
shift_x_max = args.shift_x_max
shift_y_min = args.shift_y_min
shift_y_max = args.shift_y_max
steps = args.steps
erase_debug_window = args.erase_debug_window
seed = args.seed
image_count = args.start_count
random.seed(seed)
# 格納ディレクトリ作成
dataset_dir = 'dataset/'
for number in range(12):
os.makedirs(dataset_dir + '{:02}'.format(number), exist_ok=True)
# カラーセット
color_set_list = [
# bg_color, line_color, line_bg_color
[(110, 120, 120), (10, 20, 20), (90, 100, 100)],
[(113, 167, 154), (0, 6, 0), (104, 139, 129)],
[(2, 5, 19), (246, 247, 247), (17, 20, 35)],
[(242, 242, 242), (2, 2, 2), (222, 222, 222)],
[(3, 0, 12), (39, 87, 211), (68, 71, 72)],
[(3, 0, 12), (234, 157, 9), (68, 71, 72)],
[(3, 1, 29), (6, 0, 105), (49, 56, 63)],
[(14, 123, 0), (235, 235, 235), (14, 123, 0)],
[(2, 197, 147), (37, 86, 70), (2, 197, 147)],
[(200, 219, 211), (55, 55, 55), (147, 165, 158)],
[(64, 64, 64), (35, 233, 155), (64, 64, 64)],
[(30, 27, 85), (235, 240, 237), (32, 23, 183)],
[(34, 15, 49), (247, 247, 240), (164, 131, 121)],
[(7, 0, 3), (0, 215, 238), (66, 68, 68)],
[(0, 161, 255), (21, 98, 195), (0, 161, 255)],
[(253, 146, 64), (238, 9, 5), (253, 146, 64)],
]
for _ in tqdm(range(steps)):
# 画像生成設定
number_width = random.uniform(number_width_min, number_width_max)
number_height = random.uniform(number_height_min, number_height_max)
thickness = random.uniform(thickness_min, thickness_max)
blank_ratio = random.uniform(blank_ratio_min, blank_ratio_max)
shear_x = random.uniform(shear_x_min, shear_x_max)
shift_x = random.uniform(shift_x_min, shift_x_max)
shift_y = random.uniform(shift_y_min, shift_y_max)
color_index = int(random.uniform(0, len(color_set_list)))
for number_id in range(12):
# 画像生成
image = create_7segment_image(
number=number_id,
image_size=(image_width, image_height),
bg_color=color_set_list[color_index][0],
line_color=color_set_list[color_index][1],
line_bg_color=color_set_list[color_index][2],
number_width=number_width,
number_height=number_height,
thickness=thickness,
blank_ratio=blank_ratio,
shear_x=shear_x,
shift=(shift_x, shift_y),
)
# 描画
if not erase_debug_window:
cv.imshow('7seg generator', image)
cv.waitKey(10)
# 画像保存
save_path = os.path.join(dataset_dir, '{:02}'.format(number_id),
'{:08}.png'.format(image_count))
cv.imwrite(save_path, image)
image_count += 1
cv.destroyAllWindows()
if __name__ == '__main__':
main() | 38.048611 | 80 | 0.592626 |
import os
import random
import argparse
import cv2 as cv
import numpy as np
from tqdm import tqdm
from create_7segment_image import create_7segment_image
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--width", help='image width', type=int, default=96)
parser.add_argument("--height", help='image height', type=int, default=96)
parser.add_argument("--number_width_min", type=float, default=0.1)
parser.add_argument("--number_width_max", type=float, default=0.9)
parser.add_argument("--number_height_min", type=float, default=0.4)
parser.add_argument("--number_height_max", type=float, default=0.9)
parser.add_argument("--thickness_min", type=float, default=0.01)
parser.add_argument("--thickness_max", type=float, default=0.25)
parser.add_argument("--blank_ratio_min", type=float, default=0.0)
parser.add_argument("--blank_ratio_max", type=float, default=0.1)
parser.add_argument("--shear_x_min", type=int, default=-10)
parser.add_argument("--shear_x_max", type=int, default=30)
parser.add_argument("--shift_x_min", type=int, default=-10)
parser.add_argument("--shift_x_max", type=int, default=10)
parser.add_argument("--shift_y_min", type=int, default=-10)
parser.add_argument("--shift_y_max", type=int, default=10)
parser.add_argument("--steps", help='create steps', type=int, default=3000)
parser.add_argument('--erase_debug_window', action='store_true')
parser.add_argument("--seed", help='random seed', type=int, default=42)
parser.add_argument("--start_count", type=int, default=0)
args = parser.parse_args()
return args
def main():
blank_ratio_max)
shear_x = random.uniform(shear_x_min, shear_x_max)
shift_x = random.uniform(shift_x_min, shift_x_max)
shift_y = random.uniform(shift_y_min, shift_y_max)
color_index = int(random.uniform(0, len(color_set_list)))
for number_id in range(12):
image = create_7segment_image(
number=number_id,
image_size=(image_width, image_height),
bg_color=color_set_list[color_index][0],
line_color=color_set_list[color_index][1],
line_bg_color=color_set_list[color_index][2],
number_width=number_width,
number_height=number_height,
thickness=thickness,
blank_ratio=blank_ratio,
shear_x=shear_x,
shift=(shift_x, shift_y),
)
if not erase_debug_window:
cv.imshow('7seg generator', image)
cv.waitKey(10)
save_path = os.path.join(dataset_dir, '{:02}'.format(number_id),
'{:08}.png'.format(image_count))
cv.imwrite(save_path, image)
image_count += 1
cv.destroyAllWindows()
if __name__ == '__main__':
main() | true | true |
f71bcdce19046a219d824e7ae538b1d15a34fb6e | 27,412 | py | Python | sklearn/preprocessing/tests/test_polynomial.py | talahajeer/scikit-learn | d66b42708a5912039740cd08f747229433e579b5 | [
"BSD-3-Clause"
] | 1 | 2021-12-28T09:33:38.000Z | 2021-12-28T09:33:38.000Z | sklearn/preprocessing/tests/test_polynomial.py | talahajeer/scikit-learn | d66b42708a5912039740cd08f747229433e579b5 | [
"BSD-3-Clause"
] | null | null | null | sklearn/preprocessing/tests/test_polynomial.py | talahajeer/scikit-learn | d66b42708a5912039740cd08f747229433e579b5 | [
"BSD-3-Clause"
] | 2 | 2017-01-16T17:53:31.000Z | 2017-04-22T06:13:07.000Z | import numpy as np
import pytest
from scipy import sparse
from scipy.sparse import random as sparse_random
from sklearn.utils._testing import assert_array_almost_equal
from numpy.testing import assert_allclose, assert_array_equal
from scipy.interpolate import BSpline
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
KBinsDiscretizer,
PolynomialFeatures,
SplineTransformer,
)
from sklearn.utils.fixes import linspace, sp_version, parse_version
@pytest.mark.parametrize("est", (PolynomialFeatures, SplineTransformer))
def test_polynomial_and_spline_array_order(est):
"""Test that output array has the given order."""
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(est().fit_transform(X))
assert is_c_contiguous(est(order="C").fit_transform(X))
assert np.isfortran(est(order="F").fit_transform(X))
@pytest.mark.parametrize(
"params, err_msg",
[
({"degree": -1}, "degree must be a non-negative integer"),
({"degree": 2.5}, "degree must be a non-negative integer"),
({"degree": "string"}, "degree must be a non-negative integer"),
({"n_knots": 1}, "n_knots must be a positive integer >= 2."),
({"n_knots": 1}, "n_knots must be a positive integer >= 2."),
({"n_knots": 2.5}, "n_knots must be a positive integer >= 2."),
({"n_knots": "string"}, "n_knots must be a positive integer >= 2."),
({"knots": 1}, "Expected 2D array, got scalar array instead:"),
({"knots": [1, 2]}, "Expected 2D array, got 1D array instead:"),
(
{"knots": [[1]]},
r"Number of knots, knots.shape\[0\], must be >= 2.",
),
(
{"knots": [[1, 5], [2, 6]]},
r"knots.shape\[1\] == n_features is violated.",
),
(
{"knots": [[1], [1], [2]]},
"knots must be sorted without duplicates.",
),
({"knots": [[2], [1]]}, "knots must be sorted without duplicates."),
(
{"extrapolation": None},
"extrapolation must be one of 'error', 'constant', 'linear', "
"'continue' or 'periodic'.",
),
(
{"extrapolation": 1},
"extrapolation must be one of 'error', 'constant', 'linear', "
"'continue' or 'periodic'.",
),
(
{"extrapolation": "string"},
"extrapolation must be one of 'error', 'constant', 'linear', "
"'continue' or 'periodic'.",
),
({"include_bias": None}, "include_bias must be bool."),
({"include_bias": 1}, "include_bias must be bool."),
({"include_bias": "string"}, "include_bias must be bool."),
(
{"extrapolation": "periodic", "n_knots": 3, "degree": 3},
"Periodic splines require degree < n_knots. Got n_knots=3 and degree=3.",
),
(
{"extrapolation": "periodic", "knots": [[0], [1]], "degree": 2},
"Periodic splines require degree < n_knots. Got n_knots=2 and degree=2.",
),
],
)
def test_spline_transformer_input_validation(params, err_msg):
"""Test that we raise errors for invalid input in SplineTransformer."""
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
SplineTransformer(**params).fit(X)
def test_spline_transformer_manual_knot_input():
"""
Test that array-like knot positions in SplineTransformer are accepted.
"""
X = np.arange(20).reshape(10, 2)
knots = [[0.5, 1], [1.5, 2], [5, 10]]
st1 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)
knots = np.asarray(knots)
st2 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)
for i in range(X.shape[1]):
assert_allclose(st1.bsplines_[i].t, st2.bsplines_[i].t)
@pytest.mark.parametrize("extrapolation", ["continue", "periodic"])
def test_spline_transformer_integer_knots(extrapolation):
"""Test that SplineTransformer accepts integer value knot positions."""
X = np.arange(20).reshape(10, 2)
knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]
_ = SplineTransformer(
degree=3, knots=knots, extrapolation=extrapolation
).fit_transform(X)
def test_spline_transformer_feature_names():
"""Test that SplineTransformer generates correct features name."""
X = np.arange(20).reshape(10, 2)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)
feature_names = splt.get_feature_names()
assert_array_equal(
feature_names,
[
"x0_sp_0",
"x0_sp_1",
"x0_sp_2",
"x0_sp_3",
"x0_sp_4",
"x1_sp_0",
"x1_sp_1",
"x1_sp_2",
"x1_sp_3",
"x1_sp_4",
],
)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)
feature_names = splt.get_feature_names(["a", "b"])
assert_array_equal(
feature_names,
[
"a_sp_0",
"a_sp_1",
"a_sp_2",
"a_sp_3",
"b_sp_0",
"b_sp_1",
"b_sp_2",
"b_sp_3",
],
)
@pytest.mark.parametrize("degree", range(1, 5))
@pytest.mark.parametrize("n_knots", range(3, 5))
@pytest.mark.parametrize("knots", ["uniform", "quantile"])
@pytest.mark.parametrize("extrapolation", ["constant", "periodic"])
def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation):
"""Test that B-splines are indeed a decomposition of unity.
Splines basis functions must sum up to 1 per row, if we stay in between
boundaries.
"""
X = np.linspace(0, 1, 100)[:, None]
# make the boundaries 0 and 1 part of X_train, for sure.
X_train = np.r_[[[0]], X[::2, :], [[1]]]
X_test = X[1::2, :]
if extrapolation == "periodic":
n_knots = n_knots + degree # periodic splines require degree < n_knots
splt = SplineTransformer(
n_knots=n_knots,
degree=degree,
knots=knots,
include_bias=True,
extrapolation=extrapolation,
)
splt.fit(X_train)
for X in [X_train, X_test]:
assert_allclose(np.sum(splt.transform(X), axis=1), 1)
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
def test_spline_transformer_linear_regression(bias, intercept):
"""Test that B-splines fit a sinusodial curve pretty well."""
X = np.linspace(0, 10, 100)[:, None]
y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=15,
degree=3,
include_bias=bias,
extrapolation="constant",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict(X), y, rtol=1e-3)
@pytest.mark.parametrize(
"knots, n_knots, degree",
[
("uniform", 5, 3),
("uniform", 12, 8),
(
[[-1.0, 0.0], [0, 1.0], [0.1, 2.0], [0.2, 3.0], [0.3, 4.0], [1, 5.0]],
None,
3,
),
],
)
def test_spline_transformer_periodicity_of_extrapolation(knots, n_knots, degree):
"""Test that the SplineTransformer is periodic for multiple features."""
X_1 = linspace((-1, 0), (1, 5), 10)
X_2 = linspace((1, 5), (3, 10), 10)
splt = SplineTransformer(
knots=knots, n_knots=n_knots, degree=degree, extrapolation="periodic"
)
splt.fit(X_1)
assert_allclose(splt.transform(X_1), splt.transform(X_2))
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
def test_spline_transformer_periodic_linear_regression(bias, intercept):
"""Test that B-splines fit a periodic curve pretty well."""
# "+ 3" to avoid the value 0 in assert_allclose
def f(x):
return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3
X = np.linspace(0, 1, 101)[:, None]
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=20,
degree=3,
include_bias=bias,
extrapolation="periodic",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, f(X[:, 0]))
# Generate larger array to check periodic extrapolation
X_ = np.linspace(-1, 2, 301)[:, None]
predictions = pipe.predict(X_)
assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)
assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)
@pytest.mark.skipif(
sp_version < parse_version("1.0.0"),
reason="Periodic extrapolation not yet implemented for BSpline.",
)
def test_spline_transformer_periodic_spline_backport():
"""Test that the backport of extrapolate="periodic" works correctly"""
X = np.linspace(-2, 3.5, 10)[:, None]
degree = 2
# Use periodic extrapolation backport in SplineTransformer
transformer = SplineTransformer(
degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]]
)
Xt = transformer.fit_transform(X)
# Use periodic extrapolation in BSpline
coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
spl = BSpline(np.arange(-3, 4), coef, degree, "periodic")
Xspl = spl(X[:, 0])
assert_allclose(Xt, Xspl)
def test_spline_transformer_periodic_splines_periodicity():
"""
Test if shifted knots result in the same transformation up to permutation.
"""
X = np.linspace(0, 10, 101)[:, None]
transformer_1 = SplineTransformer(
degree=3,
extrapolation="periodic",
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
)
transformer_2 = SplineTransformer(
degree=3,
extrapolation="periodic",
knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]],
)
Xt_1 = transformer_1.fit_transform(X)
Xt_2 = transformer_2.fit_transform(X)
assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])
@pytest.mark.parametrize("degree", [3, 5])
def test_spline_transformer_periodic_splines_smoothness(degree):
"""Test that spline transformation is smooth at first / last knot."""
X = np.linspace(-2, 10, 10_000)[:, None]
transformer = SplineTransformer(
degree=degree,
extrapolation="periodic",
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
)
Xt = transformer.fit_transform(X)
delta = (X.max() - X.min()) / len(X)
tol = 10 * delta
dXt = Xt
# We expect splines of degree `degree` to be (`degree`-1) times
# continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th
# derivative should be continous. This is the case if the (d+1)-th
# numerical derivative is reasonably small (smaller than `tol` in absolute
# value). We thus compute d-th numeric derivatives for d = 1, ..., `degree`
# and compare them to `tol`.
#
# Note that the 0-th derivative is the function itself, such that we are
# also checking its continuity.
for d in range(1, degree + 1):
# Check continuity of the (d-1)-th derivative
diff = np.diff(dXt, axis=0)
assert np.abs(diff).max() < tol
# Compute d-th numeric derivative
dXt = diff / delta
# As degree `degree` splines are not `degree` times continously
# differentiable at the knots, the `degree + 1`-th numeric derivative
# should have spikes at the knots.
diff = np.diff(dXt, axis=0)
assert np.abs(diff).max() > 1
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
@pytest.mark.parametrize("degree", [1, 2, 3, 4, 5])
def test_spline_transformer_extrapolation(bias, intercept, degree):
"""Test that B-spline extrapolation works correctly."""
# we use a straight line for that
X = np.linspace(-1, 1, 100)[:, None]
y = X.squeeze()
# 'constant'
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="constant",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])
# 'linear'
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="linear",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])
# 'error'
splt = SplineTransformer(
n_knots=4, degree=degree, include_bias=bias, extrapolation="error"
)
splt.fit(X)
with pytest.raises(ValueError):
splt.transform([[-10]])
with pytest.raises(ValueError):
splt.transform([[5]])
def test_spline_transformer_kbindiscretizer():
"""Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer."""
rng = np.random.RandomState(97531)
X = rng.randn(200).reshape(200, 1)
n_bins = 5
n_knots = n_bins + 1
splt = SplineTransformer(
n_knots=n_knots, degree=0, knots="quantile", include_bias=True
)
splines = splt.fit_transform(X)
kbd = KBinsDiscretizer(n_bins=n_bins, encode="onehot-dense", strategy="quantile")
kbins = kbd.fit_transform(X)
# Though they should be exactly equal, we test approximately with high
# accuracy.
assert_allclose(splines, kbins, rtol=1e-13)
@pytest.mark.parametrize("n_knots", [5, 10])
@pytest.mark.parametrize("include_bias", [True, False])
@pytest.mark.parametrize("degree", [3, 5])
def test_spline_transformer_n_features_out(n_knots, include_bias, degree):
"""Test that transform results in n_features_out_ features."""
splt = SplineTransformer(n_knots=n_knots, degree=degree, include_bias=include_bias)
X = np.linspace(0, 1, 10)[:, None]
splt.fit(X)
assert splt.transform(X).shape[1] == splt.n_features_out_
@pytest.mark.parametrize(
"params, err_msg",
[
({"degree": -1}, "degree must be a non-negative integer"),
({"degree": 2.5}, "degree must be a non-negative int or tuple"),
({"degree": "12"}, r"degree=\(min_degree, max_degree\) must"),
({"degree": "string"}, "degree must be a non-negative int or tuple"),
({"degree": (-1, 2)}, r"degree=\(min_degree, max_degree\) must"),
({"degree": (0, 1.5)}, r"degree=\(min_degree, max_degree\) must"),
({"degree": (3, 2)}, r"degree=\(min_degree, max_degree\) must"),
],
)
def test_polynomial_features_input_validation(params, err_msg):
"""Test that we raise errors for invalid input in PolynomialFeatures."""
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
PolynomialFeatures(**params).fit(X)
@pytest.fixture()
def single_feature_degree3():
X = np.arange(6)[:, np.newaxis]
P = np.hstack([np.ones_like(X), X, X ** 2, X ** 3])
return X, P
@pytest.mark.parametrize(
"degree, include_bias, interaction_only, indices",
[
(3, True, False, slice(None, None)),
(3, False, False, slice(1, None)),
(3, True, True, [0, 1]),
(3, False, True, [1]),
((2, 3), True, False, [0, 2, 3]),
((2, 3), False, False, [2, 3]),
((2, 3), True, True, [0]),
((2, 3), False, True, []),
],
)
@pytest.mark.parametrize(
"sparse_X",
[False, sparse.csr_matrix, sparse.csc_matrix],
)
def test_polynomial_features_one_feature(
single_feature_degree3,
degree,
include_bias,
interaction_only,
indices,
sparse_X,
):
"""Test PolynomialFeatures on single feature up to degree 3."""
X, P = single_feature_degree3
if sparse_X:
X = sparse_X(X)
tf = PolynomialFeatures(
degree=degree, include_bias=include_bias, interaction_only=interaction_only
).fit(X)
out = tf.transform(X)
if sparse_X:
out = out.toarray()
assert_allclose(out, P[:, indices])
if tf.n_output_features_ > 0:
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
@pytest.fixture()
def two_features_degree3():
X = np.arange(6).reshape((3, 2))
x1 = X[:, :1]
x2 = X[:, 1:]
P = np.hstack(
[
x1 ** 0 * x2 ** 0, # 0
x1 ** 1 * x2 ** 0, # 1
x1 ** 0 * x2 ** 1, # 2
x1 ** 2 * x2 ** 0, # 3
x1 ** 1 * x2 ** 1, # 4
x1 ** 0 * x2 ** 2, # 5
x1 ** 3 * x2 ** 0, # 6
x1 ** 2 * x2 ** 1, # 7
x1 ** 1 * x2 ** 2, # 8
x1 ** 0 * x2 ** 3, # 9
]
)
return X, P
@pytest.mark.parametrize(
"degree, include_bias, interaction_only, indices",
[
(2, True, False, slice(0, 6)),
(2, False, False, slice(1, 6)),
(2, True, True, [0, 1, 2, 4]),
(2, False, True, [1, 2, 4]),
((2, 2), True, False, [0, 3, 4, 5]),
((2, 2), False, False, [3, 4, 5]),
((2, 2), True, True, [0, 4]),
((2, 2), False, True, [4]),
(3, True, False, slice(None, None)),
(3, False, False, slice(1, None)),
(3, True, True, [0, 1, 2, 4]),
(3, False, True, [1, 2, 4]),
((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]),
((2, 3), False, False, slice(3, None)),
((2, 3), True, True, [0, 4]),
((2, 3), False, True, [4]),
((3, 3), True, False, [0, 6, 7, 8, 9]),
((3, 3), False, False, [6, 7, 8, 9]),
((3, 3), True, True, [0]),
((3, 3), False, True, []), # would need 3 input features
],
)
@pytest.mark.parametrize(
"sparse_X",
[False, sparse.csr_matrix, sparse.csc_matrix],
)
def test_polynomial_features_two_features(
two_features_degree3,
degree,
include_bias,
interaction_only,
indices,
sparse_X,
):
"""Test PolynomialFeatures on 2 features up to degree 3."""
X, P = two_features_degree3
if sparse_X:
X = sparse_X(X)
tf = PolynomialFeatures(
degree=degree, include_bias=include_bias, interaction_only=interaction_only
).fit(X)
out = tf.transform(X)
if sparse_X:
out = out.toarray()
assert_allclose(out, P[:, indices])
if tf.n_output_features_ > 0:
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(
["1", "x0", "x1", "x2", "x0^2", "x0 x1", "x0 x2", "x1^2", "x1 x2", "x2^2"],
feature_names,
)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(
[
"a",
"b",
"c",
"a^2",
"a b",
"a c",
"b^2",
"b c",
"c^2",
"a^3",
"a^2 b",
"a^2 c",
"a b^2",
"a b c",
"a c^2",
"b^3",
"b^2 c",
"b c^2",
"c^3",
],
feature_names,
)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(
[
"a^2",
"a b",
"a c",
"b^2",
"b c",
"c^2",
"a^3",
"a^2 b",
"a^2 c",
"a b^2",
"a b c",
"a c^2",
"b^3",
"b^2 c",
"b c^2",
"c^3",
],
feature_names,
)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(
degree=(3, 3), include_bias=True, interaction_only=True
).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(["1", "a b c"], feature_names)
assert len(feature_names) == poly.transform(X).shape[1]
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(["\u0001F40D", "\u262E", "\u05D0"])
assert_array_equal(["1", "\u0001F40D", "\u262E", "\u05D0"], feature_names)
@pytest.mark.parametrize(
["deg", "include_bias", "interaction_only", "dtype"],
[
(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
(4, False, False, np.float64),
(4, False, True, np.float64),
],
)
def test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csc = sparse.csc_matrix(X)
est = PolynomialFeatures(
deg, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csc = est.fit_transform(X_csc.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csc, sparse.csc_matrix)
assert Xt_csc.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csc.A, Xt_dense)
@pytest.mark.parametrize(
["deg", "include_bias", "interaction_only", "dtype"],
[
(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
],
)
def test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csr = sparse.csr_matrix(X)
est = PolynomialFeatures(
deg, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype, copy=False))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize("n_features", [1, 4, 5])
@pytest.mark.parametrize(
"min_degree, max_degree", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)]
)
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
def test_num_combinations(
n_features,
min_degree,
max_degree,
interaction_only,
include_bias,
):
"""
Test that n_output_features_ is calculated correctly.
"""
x = sparse.csr_matrix(([1], ([0], [n_features - 1])))
est = PolynomialFeatures(
degree=max_degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
est.fit(x)
num_combos = est.n_output_features_
combos = PolynomialFeatures._combinations(
n_features=n_features,
min_degree=0,
max_degree=max_degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
assert num_combos == sum([1 for _ in combos])
@pytest.mark.parametrize(
["deg", "include_bias", "interaction_only", "dtype"],
[
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
],
)
def test_polynomial_features_csr_X_floats(deg, include_bias, interaction_only, dtype):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(
deg, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(
["zero_row_index", "deg", "interaction_only"],
[
(0, 2, True),
(1, 2, True),
(2, 2, True),
(0, 3, True),
(1, 3, True),
(2, 3, True),
(0, 2, False),
(1, 2, False),
(2, 2, False),
(0, 3, False),
(1, 3, False),
(2, 3, False),
],
)
def test_polynomial_features_csr_X_zero_row(zero_row_index, deg, interaction_only):
X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()
X_csr[zero_row_index, :] = 0.0
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
# This degree should always be one more than the highest degree supported by
# _csr_expansion.
@pytest.mark.parametrize(
["include_bias", "interaction_only"],
[(True, True), (True, False), (False, True), (False, False)],
)
def test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(
4, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(
["deg", "dim", "interaction_only"],
[
(2, 1, True),
(2, 2, True),
(3, 1, True),
(3, 2, True),
(3, 3, True),
(2, 1, False),
(2, 2, False),
(3, 1, False),
(3, 2, False),
(3, 3, False),
],
)
def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):
X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
def test_polynomial_features_deprecated_n_input_features():
# check that we raise a deprecation warning when accessing
# `n_input_features_`. FIXME: remove in 1.2
depr_msg = (
"The attribute `n_input_features_` was deprecated in version "
"1.0 and will be removed in 1.2."
)
X = np.arange(10).reshape(5, 2)
with pytest.warns(FutureWarning, match=depr_msg):
PolynomialFeatures().fit(X).n_input_features_
| 31.948718 | 88 | 0.583212 | import numpy as np
import pytest
from scipy import sparse
from scipy.sparse import random as sparse_random
from sklearn.utils._testing import assert_array_almost_equal
from numpy.testing import assert_allclose, assert_array_equal
from scipy.interpolate import BSpline
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
KBinsDiscretizer,
PolynomialFeatures,
SplineTransformer,
)
from sklearn.utils.fixes import linspace, sp_version, parse_version
@pytest.mark.parametrize("est", (PolynomialFeatures, SplineTransformer))
def test_polynomial_and_spline_array_order(est):
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(est().fit_transform(X))
assert is_c_contiguous(est(order="C").fit_transform(X))
assert np.isfortran(est(order="F").fit_transform(X))
@pytest.mark.parametrize(
"params, err_msg",
[
({"degree": -1}, "degree must be a non-negative integer"),
({"degree": 2.5}, "degree must be a non-negative integer"),
({"degree": "string"}, "degree must be a non-negative integer"),
({"n_knots": 1}, "n_knots must be a positive integer >= 2."),
({"n_knots": 1}, "n_knots must be a positive integer >= 2."),
({"n_knots": 2.5}, "n_knots must be a positive integer >= 2."),
({"n_knots": "string"}, "n_knots must be a positive integer >= 2."),
({"knots": 1}, "Expected 2D array, got scalar array instead:"),
({"knots": [1, 2]}, "Expected 2D array, got 1D array instead:"),
(
{"knots": [[1]]},
r"Number of knots, knots.shape\[0\], must be >= 2.",
),
(
{"knots": [[1, 5], [2, 6]]},
r"knots.shape\[1\] == n_features is violated.",
),
(
{"knots": [[1], [1], [2]]},
"knots must be sorted without duplicates.",
),
({"knots": [[2], [1]]}, "knots must be sorted without duplicates."),
(
{"extrapolation": None},
"extrapolation must be one of 'error', 'constant', 'linear', "
"'continue' or 'periodic'.",
),
(
{"extrapolation": 1},
"extrapolation must be one of 'error', 'constant', 'linear', "
"'continue' or 'periodic'.",
),
(
{"extrapolation": "string"},
"extrapolation must be one of 'error', 'constant', 'linear', "
"'continue' or 'periodic'.",
),
({"include_bias": None}, "include_bias must be bool."),
({"include_bias": 1}, "include_bias must be bool."),
({"include_bias": "string"}, "include_bias must be bool."),
(
{"extrapolation": "periodic", "n_knots": 3, "degree": 3},
"Periodic splines require degree < n_knots. Got n_knots=3 and degree=3.",
),
(
{"extrapolation": "periodic", "knots": [[0], [1]], "degree": 2},
"Periodic splines require degree < n_knots. Got n_knots=2 and degree=2.",
),
],
)
def test_spline_transformer_input_validation(params, err_msg):
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
SplineTransformer(**params).fit(X)
def test_spline_transformer_manual_knot_input():
X = np.arange(20).reshape(10, 2)
knots = [[0.5, 1], [1.5, 2], [5, 10]]
st1 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)
knots = np.asarray(knots)
st2 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)
for i in range(X.shape[1]):
assert_allclose(st1.bsplines_[i].t, st2.bsplines_[i].t)
@pytest.mark.parametrize("extrapolation", ["continue", "periodic"])
def test_spline_transformer_integer_knots(extrapolation):
X = np.arange(20).reshape(10, 2)
knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]
_ = SplineTransformer(
degree=3, knots=knots, extrapolation=extrapolation
).fit_transform(X)
def test_spline_transformer_feature_names():
X = np.arange(20).reshape(10, 2)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)
feature_names = splt.get_feature_names()
assert_array_equal(
feature_names,
[
"x0_sp_0",
"x0_sp_1",
"x0_sp_2",
"x0_sp_3",
"x0_sp_4",
"x1_sp_0",
"x1_sp_1",
"x1_sp_2",
"x1_sp_3",
"x1_sp_4",
],
)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)
feature_names = splt.get_feature_names(["a", "b"])
assert_array_equal(
feature_names,
[
"a_sp_0",
"a_sp_1",
"a_sp_2",
"a_sp_3",
"b_sp_0",
"b_sp_1",
"b_sp_2",
"b_sp_3",
],
)
@pytest.mark.parametrize("degree", range(1, 5))
@pytest.mark.parametrize("n_knots", range(3, 5))
@pytest.mark.parametrize("knots", ["uniform", "quantile"])
@pytest.mark.parametrize("extrapolation", ["constant", "periodic"])
def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation):
X = np.linspace(0, 1, 100)[:, None]
X_train = np.r_[[[0]], X[::2, :], [[1]]]
X_test = X[1::2, :]
if extrapolation == "periodic":
n_knots = n_knots + degree
splt = SplineTransformer(
n_knots=n_knots,
degree=degree,
knots=knots,
include_bias=True,
extrapolation=extrapolation,
)
splt.fit(X_train)
for X in [X_train, X_test]:
assert_allclose(np.sum(splt.transform(X), axis=1), 1)
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
def test_spline_transformer_linear_regression(bias, intercept):
X = np.linspace(0, 10, 100)[:, None]
y = np.sin(X[:, 0]) + 2
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=15,
degree=3,
include_bias=bias,
extrapolation="constant",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict(X), y, rtol=1e-3)
@pytest.mark.parametrize(
"knots, n_knots, degree",
[
("uniform", 5, 3),
("uniform", 12, 8),
(
[[-1.0, 0.0], [0, 1.0], [0.1, 2.0], [0.2, 3.0], [0.3, 4.0], [1, 5.0]],
None,
3,
),
],
)
def test_spline_transformer_periodicity_of_extrapolation(knots, n_knots, degree):
X_1 = linspace((-1, 0), (1, 5), 10)
X_2 = linspace((1, 5), (3, 10), 10)
splt = SplineTransformer(
knots=knots, n_knots=n_knots, degree=degree, extrapolation="periodic"
)
splt.fit(X_1)
assert_allclose(splt.transform(X_1), splt.transform(X_2))
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
def test_spline_transformer_periodic_linear_regression(bias, intercept):
def f(x):
return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3
X = np.linspace(0, 1, 101)[:, None]
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=20,
degree=3,
include_bias=bias,
extrapolation="periodic",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, f(X[:, 0]))
X_ = np.linspace(-1, 2, 301)[:, None]
predictions = pipe.predict(X_)
assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)
assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)
@pytest.mark.skipif(
sp_version < parse_version("1.0.0"),
reason="Periodic extrapolation not yet implemented for BSpline.",
)
def test_spline_transformer_periodic_spline_backport():
X = np.linspace(-2, 3.5, 10)[:, None]
degree = 2
transformer = SplineTransformer(
degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]]
)
Xt = transformer.fit_transform(X)
coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
spl = BSpline(np.arange(-3, 4), coef, degree, "periodic")
Xspl = spl(X[:, 0])
assert_allclose(Xt, Xspl)
def test_spline_transformer_periodic_splines_periodicity():
X = np.linspace(0, 10, 101)[:, None]
transformer_1 = SplineTransformer(
degree=3,
extrapolation="periodic",
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
)
transformer_2 = SplineTransformer(
degree=3,
extrapolation="periodic",
knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]],
)
Xt_1 = transformer_1.fit_transform(X)
Xt_2 = transformer_2.fit_transform(X)
assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])
@pytest.mark.parametrize("degree", [3, 5])
def test_spline_transformer_periodic_splines_smoothness(degree):
X = np.linspace(-2, 10, 10_000)[:, None]
transformer = SplineTransformer(
degree=degree,
extrapolation="periodic",
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
)
Xt = transformer.fit_transform(X)
delta = (X.max() - X.min()) / len(X)
tol = 10 * delta
dXt = Xt
for d in range(1, degree + 1):
diff = np.diff(dXt, axis=0)
assert np.abs(diff).max() < tol
dXt = diff / delta
diff = np.diff(dXt, axis=0)
assert np.abs(diff).max() > 1
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
@pytest.mark.parametrize("degree", [1, 2, 3, 4, 5])
def test_spline_transformer_extrapolation(bias, intercept, degree):
X = np.linspace(-1, 1, 100)[:, None]
y = X.squeeze()
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="constant",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="linear",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])
splt = SplineTransformer(
n_knots=4, degree=degree, include_bias=bias, extrapolation="error"
)
splt.fit(X)
with pytest.raises(ValueError):
splt.transform([[-10]])
with pytest.raises(ValueError):
splt.transform([[5]])
def test_spline_transformer_kbindiscretizer():
rng = np.random.RandomState(97531)
X = rng.randn(200).reshape(200, 1)
n_bins = 5
n_knots = n_bins + 1
splt = SplineTransformer(
n_knots=n_knots, degree=0, knots="quantile", include_bias=True
)
splines = splt.fit_transform(X)
kbd = KBinsDiscretizer(n_bins=n_bins, encode="onehot-dense", strategy="quantile")
kbins = kbd.fit_transform(X)
assert_allclose(splines, kbins, rtol=1e-13)
@pytest.mark.parametrize("n_knots", [5, 10])
@pytest.mark.parametrize("include_bias", [True, False])
@pytest.mark.parametrize("degree", [3, 5])
def test_spline_transformer_n_features_out(n_knots, include_bias, degree):
splt = SplineTransformer(n_knots=n_knots, degree=degree, include_bias=include_bias)
X = np.linspace(0, 1, 10)[:, None]
splt.fit(X)
assert splt.transform(X).shape[1] == splt.n_features_out_
@pytest.mark.parametrize(
"params, err_msg",
[
({"degree": -1}, "degree must be a non-negative integer"),
({"degree": 2.5}, "degree must be a non-negative int or tuple"),
({"degree": "12"}, r"degree=\(min_degree, max_degree\) must"),
({"degree": "string"}, "degree must be a non-negative int or tuple"),
({"degree": (-1, 2)}, r"degree=\(min_degree, max_degree\) must"),
({"degree": (0, 1.5)}, r"degree=\(min_degree, max_degree\) must"),
({"degree": (3, 2)}, r"degree=\(min_degree, max_degree\) must"),
],
)
def test_polynomial_features_input_validation(params, err_msg):
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
PolynomialFeatures(**params).fit(X)
@pytest.fixture()
def single_feature_degree3():
X = np.arange(6)[:, np.newaxis]
P = np.hstack([np.ones_like(X), X, X ** 2, X ** 3])
return X, P
@pytest.mark.parametrize(
"degree, include_bias, interaction_only, indices",
[
(3, True, False, slice(None, None)),
(3, False, False, slice(1, None)),
(3, True, True, [0, 1]),
(3, False, True, [1]),
((2, 3), True, False, [0, 2, 3]),
((2, 3), False, False, [2, 3]),
((2, 3), True, True, [0]),
((2, 3), False, True, []),
],
)
@pytest.mark.parametrize(
"sparse_X",
[False, sparse.csr_matrix, sparse.csc_matrix],
)
def test_polynomial_features_one_feature(
single_feature_degree3,
degree,
include_bias,
interaction_only,
indices,
sparse_X,
):
X, P = single_feature_degree3
if sparse_X:
X = sparse_X(X)
tf = PolynomialFeatures(
degree=degree, include_bias=include_bias, interaction_only=interaction_only
).fit(X)
out = tf.transform(X)
if sparse_X:
out = out.toarray()
assert_allclose(out, P[:, indices])
if tf.n_output_features_ > 0:
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
@pytest.fixture()
def two_features_degree3():
X = np.arange(6).reshape((3, 2))
x1 = X[:, :1]
x2 = X[:, 1:]
P = np.hstack(
[
x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2,
x1 ** 3 * x2 ** 0,
x1 ** 2 * x2 ** 1,
x1 ** 1 * x2 ** 2,
x1 ** 0 * x2 ** 3,
]
)
return X, P
@pytest.mark.parametrize(
"degree, include_bias, interaction_only, indices",
[
(2, True, False, slice(0, 6)),
(2, False, False, slice(1, 6)),
(2, True, True, [0, 1, 2, 4]),
(2, False, True, [1, 2, 4]),
((2, 2), True, False, [0, 3, 4, 5]),
((2, 2), False, False, [3, 4, 5]),
((2, 2), True, True, [0, 4]),
((2, 2), False, True, [4]),
(3, True, False, slice(None, None)),
(3, False, False, slice(1, None)),
(3, True, True, [0, 1, 2, 4]),
(3, False, True, [1, 2, 4]),
((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]),
((2, 3), False, False, slice(3, None)),
((2, 3), True, True, [0, 4]),
((2, 3), False, True, [4]),
((3, 3), True, False, [0, 6, 7, 8, 9]),
((3, 3), False, False, [6, 7, 8, 9]),
((3, 3), True, True, [0]),
((3, 3), False, True, []),
],
)
@pytest.mark.parametrize(
"sparse_X",
[False, sparse.csr_matrix, sparse.csc_matrix],
)
def test_polynomial_features_two_features(
two_features_degree3,
degree,
include_bias,
interaction_only,
indices,
sparse_X,
):
X, P = two_features_degree3
if sparse_X:
X = sparse_X(X)
tf = PolynomialFeatures(
degree=degree, include_bias=include_bias, interaction_only=interaction_only
).fit(X)
out = tf.transform(X)
if sparse_X:
out = out.toarray()
assert_allclose(out, P[:, indices])
if tf.n_output_features_ > 0:
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(
["1", "x0", "x1", "x2", "x0^2", "x0 x1", "x0 x2", "x1^2", "x1 x2", "x2^2"],
feature_names,
)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(
[
"a",
"b",
"c",
"a^2",
"a b",
"a c",
"b^2",
"b c",
"c^2",
"a^3",
"a^2 b",
"a^2 c",
"a b^2",
"a b c",
"a c^2",
"b^3",
"b^2 c",
"b c^2",
"c^3",
],
feature_names,
)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(
[
"a^2",
"a b",
"a c",
"b^2",
"b c",
"c^2",
"a^3",
"a^2 b",
"a^2 c",
"a b^2",
"a b c",
"a c^2",
"b^3",
"b^2 c",
"b c^2",
"c^3",
],
feature_names,
)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(
degree=(3, 3), include_bias=True, interaction_only=True
).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(["1", "a b c"], feature_names)
assert len(feature_names) == poly.transform(X).shape[1]
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(["\u0001F40D", "\u262E", "\u05D0"])
assert_array_equal(["1", "\u0001F40D", "\u262E", "\u05D0"], feature_names)
@pytest.mark.parametrize(
["deg", "include_bias", "interaction_only", "dtype"],
[
(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
(4, False, False, np.float64),
(4, False, True, np.float64),
],
)
def test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csc = sparse.csc_matrix(X)
est = PolynomialFeatures(
deg, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csc = est.fit_transform(X_csc.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csc, sparse.csc_matrix)
assert Xt_csc.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csc.A, Xt_dense)
@pytest.mark.parametrize(
["deg", "include_bias", "interaction_only", "dtype"],
[
(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
],
)
def test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csr = sparse.csr_matrix(X)
est = PolynomialFeatures(
deg, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype, copy=False))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize("n_features", [1, 4, 5])
@pytest.mark.parametrize(
"min_degree, max_degree", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)]
)
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
def test_num_combinations(
n_features,
min_degree,
max_degree,
interaction_only,
include_bias,
):
x = sparse.csr_matrix(([1], ([0], [n_features - 1])))
est = PolynomialFeatures(
degree=max_degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
est.fit(x)
num_combos = est.n_output_features_
combos = PolynomialFeatures._combinations(
n_features=n_features,
min_degree=0,
max_degree=max_degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
assert num_combos == sum([1 for _ in combos])
@pytest.mark.parametrize(
["deg", "include_bias", "interaction_only", "dtype"],
[
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
],
)
def test_polynomial_features_csr_X_floats(deg, include_bias, interaction_only, dtype):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(
deg, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(
["zero_row_index", "deg", "interaction_only"],
[
(0, 2, True),
(1, 2, True),
(2, 2, True),
(0, 3, True),
(1, 3, True),
(2, 3, True),
(0, 2, False),
(1, 2, False),
(2, 2, False),
(0, 3, False),
(1, 3, False),
(2, 3, False),
],
)
def test_polynomial_features_csr_X_zero_row(zero_row_index, deg, interaction_only):
X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()
X_csr[zero_row_index, :] = 0.0
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(
["include_bias", "interaction_only"],
[(True, True), (True, False), (False, True), (False, False)],
)
def test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(
4, include_bias=include_bias, interaction_only=interaction_only
)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(
["deg", "dim", "interaction_only"],
[
(2, 1, True),
(2, 2, True),
(3, 1, True),
(3, 2, True),
(3, 3, True),
(2, 1, False),
(2, 2, False),
(3, 1, False),
(3, 2, False),
(3, 3, False),
],
)
def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):
X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
def test_polynomial_features_deprecated_n_input_features():
depr_msg = (
"The attribute `n_input_features_` was deprecated in version "
"1.0 and will be removed in 1.2."
)
X = np.arange(10).reshape(5, 2)
with pytest.warns(FutureWarning, match=depr_msg):
PolynomialFeatures().fit(X).n_input_features_
| true | true |
f71bce762094aac77e775115a4361cc778bec8f7 | 22,413 | py | Python | tests/test_iso_parsing.py | mtauban/OWSLib | 0b64e7a8f7eb9e1fca369716f9803821066bf0f3 | [
"BSD-3-Clause"
] | null | null | null | tests/test_iso_parsing.py | mtauban/OWSLib | 0b64e7a8f7eb9e1fca369716f9803821066bf0f3 | [
"BSD-3-Clause"
] | null | null | null | tests/test_iso_parsing.py | mtauban/OWSLib | 0b64e7a8f7eb9e1fca369716f9803821066bf0f3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import io
from owslib import util
from owslib.etree import etree
from owslib.iso import (
MD_Metadata,
)
from owslib.namespaces import Namespaces
def get_md_resource(file_path):
"""Read the file and parse into an XML tree.
Parameters
----------
file_path : str
Path of the file to read.
Returns
-------
etree.ElementTree
XML tree of the resource on disk.
"""
namespaces = Namespaces().get_namespaces(keys=('gmd', 'gmi'))
with io.open(file_path, mode='r', encoding='utf-8') as f:
data = f.read().encode('utf-8')
data = etree.fromstring(data)
mdelem = data.find('.//' + util.nspath_eval(
'gmd:MD_Metadata', namespaces)) or data.find(
'.//' + util.nspath_eval('gmi:MI_Metadata', namespaces))
if mdelem is None and data.tag in ['{http://www.isotc211.org/2005/gmd}MD_Metadata',
'{http://www.isotc211.org/2005/gmi}MI_Metadata']:
mdelem = data
return mdelem
def assert_list(var, length):
"""Assert a given variable is a list with given size.
Parameters
----------
var : variable
Variable to test (i.e. should be a list).
length : int
The length/size of the list.
"""
assert type(var) is list
assert len(var) == length
def test_md_parsing_dov():
"""Test the parsing of a metadatarecord from DOV
GetRecordById response available in
tests/resources/csw_dov_getrecordbyid.xml
"""
md_resource = get_md_resource('tests/resources/csw_dov_getrecordbyid.xml')
md = MD_Metadata(md_resource)
assert type(md) is MD_Metadata
assert md.identifier == '6c39d716-aecc-4fbc-bac8-4f05a49a78d5'
assert md.dataseturi is None
assert md.parentidentifier is None
assert md.language is None
assert md.languagecode == 'dut'
assert md.charset == 'utf8'
assert md.datestamp == '2018-02-21T16:14:24'
assert md.hierarchy == 'dataset'
assert_list(md.contact, 1)
contact = md.contact[0]
assert contact.organization == 'Vlaamse overheid - Vlaamse ' \
'MilieuMaatschappij - Afdeling ' \
'Operationeel Waterbeheer'
assert contact.address == 'Koning Albert II-laan 20 bus 16'
assert contact.city == 'Brussel'
assert contact.postcode == '1000'
assert contact.country == u'België'
assert contact.email == 'info@vmm.be'
assert contact.onlineresource.url == 'https://www.vmm.be'
assert contact.role == 'pointOfContact'
assert md.stdname == 'ISO 19115/2003/Cor.1:2006'
assert md.stdver == 'GDI-Vlaanderen Best Practices - versie 1.0'
assert md.referencesystem.code == '31370'
assert md.referencesystem.codeSpace == 'EPSG'
assert_list(md.identificationinfo, 1)
iden = md.identificationinfo[0]
assert iden.title == 'Grondwatermeetnetten'
assert iden.alternatetitle == 'Grondwatermeetnetten beschikbaar op DOV'
assert_list(iden.date, 2)
assert iden.date[0].date == '2002-05-22'
assert iden.date[0].type == 'creation'
assert iden.date[1].date == '2002-05-22'
assert iden.date[1].type == 'publication'
assert_list(iden.uricode, 1)
assert iden.uricode[0] == 'A64F073B-9FBE-91DD-36FDE7462BBAFA61'
assert_list(iden.uricodespace, 1)
assert iden.uricodespace[0] == 'DOV-be'
assert_list(iden.uselimitation, 3)
assert "Zie 'Overige beperkingen'" in iden.uselimitation
assert "Bij het gebruik van de informatie die DOV aanbiedt, dient steeds " \
"volgende standaardreferentie gebruikt te worden: Databank " \
"Ondergrond Vlaanderen - (vermelding van de beheerder en de " \
"specifieke geraadpleegde gegevens) - Geraadpleegd op dd/mm/jjjj, " \
"op https://www.dov.vlaanderen.be" in iden.uselimitation
assert "Volgende aansprakelijkheidsbepalingen gelden: " \
"https://www.dov.vlaanderen.be/page/disclaimer" in iden.uselimitation
assert_list(iden.uselimitation_url, 0)
assert_list(iden.accessconstraints, 1)
assert iden.accessconstraints[0] == 'otherRestrictions'
assert_list(iden.classification, 0)
assert_list(iden.otherconstraints, 1)
assert iden.otherconstraints[
0] == "Data beschikbaar voor hergebruik volgens de " \
"Modellicentie Gratis Hergebruik. Toelichting " \
"beschikbaar op " \
"https://www.dov.vlaanderen.be/page/gebruiksvoorwaarden-dov-services"
assert_list(iden.securityconstraints, 1)
assert iden.securityconstraints[0] == 'unclassified'
assert_list(iden.useconstraints, 0)
assert_list(iden.denominators, 1)
assert iden.denominators[0] == '10000'
assert_list(iden.distance, 0)
assert_list(iden.uom, 0)
assert_list(iden.resourcelanguage, 0)
assert_list(iden.resourcelanguagecode, 1)
assert iden.resourcelanguagecode[0] == 'dut'
assert_list(iden.creator, 0)
assert_list(iden.publisher, 0)
assert_list(iden.contributor, 0)
assert iden.edition is None
assert iden.abstract.startswith("In de Databank Ondergrond Vlaanderen "
"zijn verschillende grondwatermeetnetten "
"opgenomen.")
assert iden.purpose.startswith(
"Het doel van de meetnetten is inzicht krijgen in de kwaliteit en "
"kwantiteit van de watervoerende lagen in de ondergrond van "
"Vlaanderen. Algemeen kan gesteld worden dat de grondwatermeetnetten "
"een belangrijk beleidsinstrument vormen")
assert iden.status == 'onGoing'
assert_list(iden.contact, 2)
assert iden.contact[0].organization == 'Vlaamse overheid - Vlaamse MilieuMaatschappij - Afdeling Operationeel Waterbeheer'
assert iden.contact[0].address == 'Koning Albert II-laan 20 bus 16'
assert iden.contact[0].city == 'Brussel'
assert iden.contact[0].postcode == '1000'
assert iden.contact[0].country == u'België'
assert iden.contact[0].email == 'info@vmm.be'
assert iden.contact[0].onlineresource.url == 'https://www.vmm.be'
assert iden.contact[0].role == 'pointOfContact'
assert iden.contact[1].organization == 'Databank Ondergrond Vlaanderen (' \
'DOV)'
assert iden.contact[1].address == 'Technologiepark Gebouw 905'
assert iden.contact[1].city == 'Zwijnaarde'
assert iden.contact[1].postcode == '9052'
assert iden.contact[1].country == u'België'
assert iden.contact[1].email == 'dov@vlaanderen.be'
assert iden.contact[1].onlineresource.url == \
'https://www.dov.vlaanderen.be'
assert iden.contact[1].role == 'distributor'
assert_list(iden.spatialrepresentationtype, 1)
assert iden.spatialrepresentationtype[0] == 'vector'
assert_list(iden.keywords, 5)
assert type(iden.keywords[0]) is dict
assert iden.keywords[0]['type'] == ''
assert iden.keywords[0]['thesaurus']['title'] == "GEMET - INSPIRE thema's, versie 1.0"
assert iden.keywords[0]['thesaurus']['date'] == '2008-06-01'
assert iden.keywords[0]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[0]['keywords'], 1)
assert iden.keywords[0]['keywords'] == ['Geologie']
assert type(iden.keywords[1]) is dict
assert iden.keywords[1]['type'] == ''
assert iden.keywords[1]['thesaurus'][
'title'] == "GEMET - Concepten, versie 2.4"
assert iden.keywords[1]['thesaurus']['date'] == '2010-01-13'
assert iden.keywords[1]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[1]['keywords'], 2)
assert iden.keywords[1]['keywords'] == ['grondwater', 'meetnet(werk)']
assert type(iden.keywords[2]) is dict
assert iden.keywords[2]['type'] == ''
assert iden.keywords[2]['thesaurus'][
'title'] == "Vlaamse regio's"
assert iden.keywords[2]['thesaurus']['date'] == '2013-09-25'
assert iden.keywords[2]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[2]['keywords'], 1)
assert iden.keywords[2]['keywords'] == ['Vlaams Gewest']
assert type(iden.keywords[3]) is dict
assert iden.keywords[3]['type'] is None
assert iden.keywords[3]['thesaurus'][
'title'] == "GDI-Vlaanderen Trefwoorden"
assert iden.keywords[3]['thesaurus']['date'] == '2014-02-26'
assert iden.keywords[3]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[3]['keywords'], 7)
assert iden.keywords[3]['keywords'] == [
'Toegevoegd GDI-Vl', 'Herbruikbaar', 'Vlaamse Open data',
'Kosteloos', 'Lijst M&R INSPIRE', 'Metadata INSPIRE-conform',
'Metadata GDI-Vl-conform']
assert type(iden.keywords[4]) is dict
assert iden.keywords[4]['type'] is None
assert iden.keywords[4]['thesaurus']['title'] == "DOV"
assert iden.keywords[4]['thesaurus']['date'] == '2010-12-01'
assert iden.keywords[4]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[4]['keywords'], 7)
assert iden.keywords[4]['keywords'] == [
'Ondergrond', 'DOV', 'Vlaanderen', 'monitoring', 'meetnetten',
'Kaderrichtlijn Water', 'Decreet Integraal waterbeleid']
assert_list(iden.keywords2, 5)
assert iden.keywords2[0].type == ''
assert iden.keywords2[0].thesaurus[
'title'] == "GEMET - INSPIRE thema's, versie 1.0"
assert iden.keywords2[0].thesaurus['date'] == '2008-06-01'
assert iden.keywords2[0].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[0].keywords, 1)
assert iden.keywords2[0].keywords == ['Geologie']
assert iden.keywords2[1].type == ''
assert iden.keywords2[1].thesaurus[
'title'] == "GEMET - Concepten, versie 2.4"
assert iden.keywords2[1].thesaurus['date'] == '2010-01-13'
assert iden.keywords2[1].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[1].keywords, 2)
assert iden.keywords2[1].keywords == ['grondwater', 'meetnet(werk)']
assert iden.keywords2[2].type == ''
assert iden.keywords2[2].thesaurus[
'title'] == "Vlaamse regio's"
assert iden.keywords2[2].thesaurus['date'] == '2013-09-25'
assert iden.keywords2[2].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[2].keywords, 1)
assert iden.keywords2[2].keywords == ['Vlaams Gewest']
assert iden.keywords2[3].type is None
assert iden.keywords2[3].thesaurus[
'title'] == "GDI-Vlaanderen Trefwoorden"
assert iden.keywords2[3].thesaurus['date'] == '2014-02-26'
assert iden.keywords2[3].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[3].keywords, 7)
assert iden.keywords2[3].keywords == [
'Toegevoegd GDI-Vl', 'Herbruikbaar', 'Vlaamse Open data',
'Kosteloos', 'Lijst M&R INSPIRE', 'Metadata INSPIRE-conform',
'Metadata GDI-Vl-conform']
assert iden.keywords2[4].type is None
assert iden.keywords2[4].thesaurus['title'] == "DOV"
assert iden.keywords2[4].thesaurus['date'] == '2010-12-01'
assert iden.keywords2[4].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[4].keywords, 7)
assert iden.keywords2[4].keywords == [
'Ondergrond', 'DOV', 'Vlaanderen', 'monitoring', 'meetnetten',
'Kaderrichtlijn Water', 'Decreet Integraal waterbeleid']
assert_list(iden.topiccategory, 1)
assert iden.topiccategory[0] == 'geoscientificInformation'
assert iden.supplementalinformation == \
"https://www.dov.vlaanderen.be/page/grondwatermeetnet"
assert_list(md.contentinfo, 1)
ci = md.contentinfo[0]
assert ci.compliancecode is None
assert_list(ci.language, 0)
assert ci.includedwithdataset == True
assert_list(ci.featuretypenames, 0)
assert_list(ci.featurecatalogues, 1)
assert ci.featurecatalogues[0] == 'b142965f-b2aa-429e-86ff-a7cb0e065d48'
def test_md_parsing_geobretagne():
"""Test the parsing of a metadatarecord from GéoBretagne
MD_Metadata record available in
tests/resources/csw_geobretagne_mdmetadata.xml
"""
md_resource = get_md_resource(
'tests/resources/csw_geobretagne_mdmetadata.xml')
md = MD_Metadata(md_resource)
assert type(md) is MD_Metadata
assert md.identifier == '955c3e47-411e-4969-b61b-3556d1b9f879'
assert md.dataseturi is None
assert md.parentidentifier is None
assert md.language == 'fre'
assert md.languagecode is None
assert md.charset == 'utf8'
assert md.datestamp == '2018-07-30T14:19:40'
assert md.hierarchy == 'dataset'
assert_list(md.contact, 1)
contact = md.contact[0]
assert contact.organization == 'DIRECTION GENERALE DES FINANCES ' \
'PUBLIQUES BUREAU GF-3A'
assert contact.address is None
assert contact.city is None
assert contact.postcode is None
assert contact.country is None
assert contact.email == 'bureau.gf3a@dgfip.finances.gouv.fr'
assert contact.onlineresource is None
assert contact.role == 'pointOfContact'
assert md.stdname == 'ISO 19115'
assert md.stdver == '1.0'
assert md.referencesystem.code == 'RGF93 / CC48 (EPSG:3948)'
assert md.referencesystem.codeSpace == 'EPSG'
assert_list(md.identificationinfo, 1)
iden = md.identificationinfo[0]
assert iden.title == 'Cadastre 2018 en Bretagne'
assert iden.alternatetitle is None
assert_list(iden.date, 1)
assert iden.date[0].date == '2018-09-01'
assert iden.date[0].type == 'revision'
assert_list(iden.uricode, 1)
assert iden.uricode[0] == 'https://geobretagne.fr/geonetwork/apps/georchestra/?uuid=363e3a8e-d0ce-497d-87a9-2a2d58d82772'
assert_list(iden.uricodespace, 0)
assert_list(iden.uselimitation, 2)
assert u"le plan cadastral décrit les limites apparentes de la " \
u"propriété." in iden.uselimitation
assert_list(iden.uselimitation_url, 0)
assert_list(iden.accessconstraints, 1)
assert iden.accessconstraints[0] == 'otherRestrictions'
assert_list(iden.classification, 0)
assert_list(iden.otherconstraints, 1)
assert iden.otherconstraints[
0] == u'Usage libre sous réserve des mentions obligatoires ' \
u'sur tout document de diffusion : "Source : DGFIP"'
assert_list(iden.securityconstraints, 0)
assert_list(iden.useconstraints, 1)
assert iden.useconstraints[0] == 'copyright'
assert_list(iden.denominators, 1)
assert iden.denominators[0] == '500'
assert_list(iden.distance, 0)
assert_list(iden.uom, 0)
assert_list(iden.resourcelanguage, 1)
assert iden.resourcelanguage[0] == 'fre'
assert_list(iden.resourcelanguagecode, 0)
assert_list(iden.creator, 0)
assert_list(iden.publisher, 0)
assert_list(iden.contributor, 0)
assert iden.edition is None
assert iden.abstract.startswith(
u"Le plan du cadastre est un document administratif qui propose "
u"l’unique plan parcellaire à grande échelle couvrant le territoire "
u"national.")
assert iden.purpose.startswith(
u"Le but premier du plan cadastral est d'identifier, de localiser et "
u"représenter la propriété foncière, ainsi que de servir à l'assise "
u"de la fiscalité locale des propriétés non bâties.")
assert iden.status == 'completed'
assert_list(iden.contact, 1)
assert iden.contact[0].organization == 'DGFIP Bretagne'
assert iden.contact[0].name == 'DIRECTION GENERALE DES FINANCES PUBLIQUES'
assert iden.contact[0].address is None
assert iden.contact[0].city is None
assert iden.contact[0].postcode is None
assert iden.contact[0].country is None
assert iden.contact[0].email == 'bureau.gf3a@dgfip.finances.gouv.fr'
assert iden.contact[0].onlineresource is None
assert iden.contact[0].role == 'pointOfContact'
assert_list(iden.spatialrepresentationtype, 1)
assert iden.spatialrepresentationtype[0] == 'vector'
assert_list(iden.keywords, 7)
assert type(iden.keywords[0]) is dict
assert iden.keywords[0]['type'] == 'place'
assert iden.keywords[0]['thesaurus']['title'] is None
assert iden.keywords[0]['thesaurus']['date'] is None
assert iden.keywords[0]['thesaurus']['datetype'] is None
assert_list(iden.keywords[0]['keywords'], 1)
assert iden.keywords[0]['keywords'] == ['France']
assert type(iden.keywords[1]) is dict
assert iden.keywords[1]['type'] is None
assert iden.keywords[1]['thesaurus']['title'] is None
assert iden.keywords[1]['thesaurus']['date'] is None
assert iden.keywords[1]['thesaurus']['datetype'] is None
assert_list(iden.keywords[1]['keywords'], 0)
assert type(iden.keywords[2]) is dict
assert iden.keywords[2]['type'] == 'theme'
assert iden.keywords[2]['thesaurus']['title'] is None
assert iden.keywords[2]['thesaurus']['date'] is None
assert iden.keywords[2]['thesaurus']['datetype'] is None
assert_list(iden.keywords[2]['keywords'], 7)
assert iden.keywords[2]['keywords'] == [
u'bâtiments', 'adresses', 'parcelles cadastrales', 'hydrographie',
u'réseaux de transport', u'unités administratives',
u'référentiels de coordonnées']
assert type(iden.keywords[3]) is dict
assert iden.keywords[3]['type'] == 'theme'
assert iden.keywords[3]['thesaurus']['title'] is None
assert iden.keywords[3]['thesaurus']['date'] is None
assert iden.keywords[3]['thesaurus']['datetype'] is None
assert_list(iden.keywords[3]['keywords'], 5)
assert iden.keywords[3]['keywords'] == [
u'bâtis', 'sections', 'parcelles', 'cadastre', 'cadastrale']
assert type(iden.keywords[4]) is dict
assert iden.keywords[4]['type'] == 'theme'
assert iden.keywords[4]['thesaurus']['title'] == u"GéoBretagne v 2.0"
assert iden.keywords[4]['thesaurus']['date'] == '2014-01-13'
assert iden.keywords[4]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[4]['keywords'], 1)
assert iden.keywords[4]['keywords'] == [u'référentiels : cadastre']
assert type(iden.keywords[5]) is dict
assert iden.keywords[5]['type'] == 'theme'
assert iden.keywords[5]['thesaurus']['title'] == "INSPIRE themes"
assert iden.keywords[5]['thesaurus']['date'] == '2008-06-01'
assert iden.keywords[5]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[5]['keywords'], 1)
assert iden.keywords[5]['keywords'] == ['Parcelles cadastrales']
assert type(iden.keywords[6]) is dict
assert iden.keywords[6]['type'] == 'theme'
assert iden.keywords[6]['thesaurus']['title'] == "GEMET"
assert iden.keywords[6]['thesaurus']['date'] == '2012-07-20'
assert iden.keywords[6]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[6]['keywords'], 2)
assert iden.keywords[6]['keywords'] == ['cadastre', u'bâtiment']
assert_list(iden.keywords2, 6)
assert iden.keywords2[0].type == 'place'
assert iden.keywords2[0].thesaurus is None
assert_list(iden.keywords2[0].keywords, 1)
assert iden.keywords2[0].keywords == ['France']
assert iden.keywords2[1].type == 'theme'
assert iden.keywords2[1].thesaurus is None
assert_list(iden.keywords2[1].keywords, 7)
assert iden.keywords2[1].keywords == [
u'bâtiments', 'adresses', 'parcelles cadastrales', 'hydrographie',
u'réseaux de transport', u'unités administratives',
u'référentiels de coordonnées']
assert iden.keywords2[2].type == 'theme'
assert iden.keywords2[2].thesaurus is None
assert_list(iden.keywords2[2].keywords, 5)
assert iden.keywords2[2].keywords == [
u'bâtis', 'sections', 'parcelles', 'cadastre', 'cadastrale']
assert iden.keywords2[3].type == 'theme'
assert iden.keywords2[3].thesaurus['title'] == u"GéoBretagne v 2.0"
assert iden.keywords2[3].thesaurus['date'] == '2014-01-13'
assert iden.keywords2[3].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[3].keywords, 1)
assert iden.keywords2[3].keywords == [u'référentiels : cadastre']
assert iden.keywords2[4].type == 'theme'
assert iden.keywords2[4].thesaurus['title'] == "INSPIRE themes"
assert iden.keywords2[4].thesaurus['date'] == '2008-06-01'
assert iden.keywords2[4].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[4].keywords, 1)
assert iden.keywords2[4].keywords == ['Parcelles cadastrales']
assert iden.keywords2[5].type == 'theme'
assert iden.keywords2[5].thesaurus['title'] == "GEMET"
assert iden.keywords2[5].thesaurus['date'] == '2012-07-20'
assert iden.keywords2[5].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[5].keywords, 2)
assert iden.keywords2[5].keywords == ['cadastre', u'bâtiment']
assert_list(iden.topiccategory, 1)
assert iden.topiccategory[0] == 'planningCadastre'
assert iden.supplementalinformation == \
u"La légende du plan cadastral est consultable sur: " \
"http://www.cadastre.gouv.fr/scpc/pdf/legendes/FR_fr/Legende%20du" \
"%20plan%20sur%20internet.pdf"
assert_list(md.contentinfo, 1)
ci = md.contentinfo[0]
assert ci.compliancecode is None
assert_list(ci.language, 0)
assert ci.includedwithdataset == False
assert_list(ci.featuretypenames, 0)
assert_list(ci.featurecatalogues, 0)
def test_md_parsing_19115_2():
"""Test the parsing of a 19115-2 document
MD_Metadata record available in
tests/resources/iso_mi.xml
"""
md_resource = get_md_resource(
'tests/resources/iso_mi.xml')
md = MD_Metadata(md_resource)
assert type(md) is MD_Metadata
assert md.identifier == '3f342f64-9348-11df-ba6a-0014c2c00eab'
ci = md.contentinfo[0]
assert ci.type == 'image'
assert ci.cloud_cover == '72'
assert ci.processing_level == '1.0'
band = ci.bands[0]
assert band.id == 'B1'
assert band.units == 'nm'
assert band.min == '932'
assert band.max == '958'
plt = md.acquisition.platforms[0]
assert plt.identifier == 'LANDSAT_8'
assert plt.description == 'Landsat 8'
inst = plt.instruments[0]
assert inst.identifier == 'OLI_TIRS'
assert inst.type == 'INS-NOBS'
| 37.923858 | 126 | 0.664659 |
import io
from owslib import util
from owslib.etree import etree
from owslib.iso import (
MD_Metadata,
)
from owslib.namespaces import Namespaces
def get_md_resource(file_path):
namespaces = Namespaces().get_namespaces(keys=('gmd', 'gmi'))
with io.open(file_path, mode='r', encoding='utf-8') as f:
data = f.read().encode('utf-8')
data = etree.fromstring(data)
mdelem = data.find('.//' + util.nspath_eval(
'gmd:MD_Metadata', namespaces)) or data.find(
'.//' + util.nspath_eval('gmi:MI_Metadata', namespaces))
if mdelem is None and data.tag in ['{http://www.isotc211.org/2005/gmd}MD_Metadata',
'{http://www.isotc211.org/2005/gmi}MI_Metadata']:
mdelem = data
return mdelem
def assert_list(var, length):
assert type(var) is list
assert len(var) == length
def test_md_parsing_dov():
md_resource = get_md_resource('tests/resources/csw_dov_getrecordbyid.xml')
md = MD_Metadata(md_resource)
assert type(md) is MD_Metadata
assert md.identifier == '6c39d716-aecc-4fbc-bac8-4f05a49a78d5'
assert md.dataseturi is None
assert md.parentidentifier is None
assert md.language is None
assert md.languagecode == 'dut'
assert md.charset == 'utf8'
assert md.datestamp == '2018-02-21T16:14:24'
assert md.hierarchy == 'dataset'
assert_list(md.contact, 1)
contact = md.contact[0]
assert contact.organization == 'Vlaamse overheid - Vlaamse ' \
'MilieuMaatschappij - Afdeling ' \
'Operationeel Waterbeheer'
assert contact.address == 'Koning Albert II-laan 20 bus 16'
assert contact.city == 'Brussel'
assert contact.postcode == '1000'
assert contact.country == u'België'
assert contact.email == 'info@vmm.be'
assert contact.onlineresource.url == 'https://www.vmm.be'
assert contact.role == 'pointOfContact'
assert md.stdname == 'ISO 19115/2003/Cor.1:2006'
assert md.stdver == 'GDI-Vlaanderen Best Practices - versie 1.0'
assert md.referencesystem.code == '31370'
assert md.referencesystem.codeSpace == 'EPSG'
assert_list(md.identificationinfo, 1)
iden = md.identificationinfo[0]
assert iden.title == 'Grondwatermeetnetten'
assert iden.alternatetitle == 'Grondwatermeetnetten beschikbaar op DOV'
assert_list(iden.date, 2)
assert iden.date[0].date == '2002-05-22'
assert iden.date[0].type == 'creation'
assert iden.date[1].date == '2002-05-22'
assert iden.date[1].type == 'publication'
assert_list(iden.uricode, 1)
assert iden.uricode[0] == 'A64F073B-9FBE-91DD-36FDE7462BBAFA61'
assert_list(iden.uricodespace, 1)
assert iden.uricodespace[0] == 'DOV-be'
assert_list(iden.uselimitation, 3)
assert "Zie 'Overige beperkingen'" in iden.uselimitation
assert "Bij het gebruik van de informatie die DOV aanbiedt, dient steeds " \
"volgende standaardreferentie gebruikt te worden: Databank " \
"Ondergrond Vlaanderen - (vermelding van de beheerder en de " \
"specifieke geraadpleegde gegevens) - Geraadpleegd op dd/mm/jjjj, " \
"op https://www.dov.vlaanderen.be" in iden.uselimitation
assert "Volgende aansprakelijkheidsbepalingen gelden: " \
"https://www.dov.vlaanderen.be/page/disclaimer" in iden.uselimitation
assert_list(iden.uselimitation_url, 0)
assert_list(iden.accessconstraints, 1)
assert iden.accessconstraints[0] == 'otherRestrictions'
assert_list(iden.classification, 0)
assert_list(iden.otherconstraints, 1)
assert iden.otherconstraints[
0] == "Data beschikbaar voor hergebruik volgens de " \
"Modellicentie Gratis Hergebruik. Toelichting " \
"beschikbaar op " \
"https://www.dov.vlaanderen.be/page/gebruiksvoorwaarden-dov-services"
assert_list(iden.securityconstraints, 1)
assert iden.securityconstraints[0] == 'unclassified'
assert_list(iden.useconstraints, 0)
assert_list(iden.denominators, 1)
assert iden.denominators[0] == '10000'
assert_list(iden.distance, 0)
assert_list(iden.uom, 0)
assert_list(iden.resourcelanguage, 0)
assert_list(iden.resourcelanguagecode, 1)
assert iden.resourcelanguagecode[0] == 'dut'
assert_list(iden.creator, 0)
assert_list(iden.publisher, 0)
assert_list(iden.contributor, 0)
assert iden.edition is None
assert iden.abstract.startswith("In de Databank Ondergrond Vlaanderen "
"zijn verschillende grondwatermeetnetten "
"opgenomen.")
assert iden.purpose.startswith(
"Het doel van de meetnetten is inzicht krijgen in de kwaliteit en "
"kwantiteit van de watervoerende lagen in de ondergrond van "
"Vlaanderen. Algemeen kan gesteld worden dat de grondwatermeetnetten "
"een belangrijk beleidsinstrument vormen")
assert iden.status == 'onGoing'
assert_list(iden.contact, 2)
assert iden.contact[0].organization == 'Vlaamse overheid - Vlaamse MilieuMaatschappij - Afdeling Operationeel Waterbeheer'
assert iden.contact[0].address == 'Koning Albert II-laan 20 bus 16'
assert iden.contact[0].city == 'Brussel'
assert iden.contact[0].postcode == '1000'
assert iden.contact[0].country == u'België'
assert iden.contact[0].email == 'info@vmm.be'
assert iden.contact[0].onlineresource.url == 'https://www.vmm.be'
assert iden.contact[0].role == 'pointOfContact'
assert iden.contact[1].organization == 'Databank Ondergrond Vlaanderen (' \
'DOV)'
assert iden.contact[1].address == 'Technologiepark Gebouw 905'
assert iden.contact[1].city == 'Zwijnaarde'
assert iden.contact[1].postcode == '9052'
assert iden.contact[1].country == u'België'
assert iden.contact[1].email == 'dov@vlaanderen.be'
assert iden.contact[1].onlineresource.url == \
'https://www.dov.vlaanderen.be'
assert iden.contact[1].role == 'distributor'
assert_list(iden.spatialrepresentationtype, 1)
assert iden.spatialrepresentationtype[0] == 'vector'
assert_list(iden.keywords, 5)
assert type(iden.keywords[0]) is dict
assert iden.keywords[0]['type'] == ''
assert iden.keywords[0]['thesaurus']['title'] == "GEMET - INSPIRE thema's, versie 1.0"
assert iden.keywords[0]['thesaurus']['date'] == '2008-06-01'
assert iden.keywords[0]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[0]['keywords'], 1)
assert iden.keywords[0]['keywords'] == ['Geologie']
assert type(iden.keywords[1]) is dict
assert iden.keywords[1]['type'] == ''
assert iden.keywords[1]['thesaurus'][
'title'] == "GEMET - Concepten, versie 2.4"
assert iden.keywords[1]['thesaurus']['date'] == '2010-01-13'
assert iden.keywords[1]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[1]['keywords'], 2)
assert iden.keywords[1]['keywords'] == ['grondwater', 'meetnet(werk)']
assert type(iden.keywords[2]) is dict
assert iden.keywords[2]['type'] == ''
assert iden.keywords[2]['thesaurus'][
'title'] == "Vlaamse regio's"
assert iden.keywords[2]['thesaurus']['date'] == '2013-09-25'
assert iden.keywords[2]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[2]['keywords'], 1)
assert iden.keywords[2]['keywords'] == ['Vlaams Gewest']
assert type(iden.keywords[3]) is dict
assert iden.keywords[3]['type'] is None
assert iden.keywords[3]['thesaurus'][
'title'] == "GDI-Vlaanderen Trefwoorden"
assert iden.keywords[3]['thesaurus']['date'] == '2014-02-26'
assert iden.keywords[3]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[3]['keywords'], 7)
assert iden.keywords[3]['keywords'] == [
'Toegevoegd GDI-Vl', 'Herbruikbaar', 'Vlaamse Open data',
'Kosteloos', 'Lijst M&R INSPIRE', 'Metadata INSPIRE-conform',
'Metadata GDI-Vl-conform']
assert type(iden.keywords[4]) is dict
assert iden.keywords[4]['type'] is None
assert iden.keywords[4]['thesaurus']['title'] == "DOV"
assert iden.keywords[4]['thesaurus']['date'] == '2010-12-01'
assert iden.keywords[4]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[4]['keywords'], 7)
assert iden.keywords[4]['keywords'] == [
'Ondergrond', 'DOV', 'Vlaanderen', 'monitoring', 'meetnetten',
'Kaderrichtlijn Water', 'Decreet Integraal waterbeleid']
assert_list(iden.keywords2, 5)
assert iden.keywords2[0].type == ''
assert iden.keywords2[0].thesaurus[
'title'] == "GEMET - INSPIRE thema's, versie 1.0"
assert iden.keywords2[0].thesaurus['date'] == '2008-06-01'
assert iden.keywords2[0].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[0].keywords, 1)
assert iden.keywords2[0].keywords == ['Geologie']
assert iden.keywords2[1].type == ''
assert iden.keywords2[1].thesaurus[
'title'] == "GEMET - Concepten, versie 2.4"
assert iden.keywords2[1].thesaurus['date'] == '2010-01-13'
assert iden.keywords2[1].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[1].keywords, 2)
assert iden.keywords2[1].keywords == ['grondwater', 'meetnet(werk)']
assert iden.keywords2[2].type == ''
assert iden.keywords2[2].thesaurus[
'title'] == "Vlaamse regio's"
assert iden.keywords2[2].thesaurus['date'] == '2013-09-25'
assert iden.keywords2[2].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[2].keywords, 1)
assert iden.keywords2[2].keywords == ['Vlaams Gewest']
assert iden.keywords2[3].type is None
assert iden.keywords2[3].thesaurus[
'title'] == "GDI-Vlaanderen Trefwoorden"
assert iden.keywords2[3].thesaurus['date'] == '2014-02-26'
assert iden.keywords2[3].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[3].keywords, 7)
assert iden.keywords2[3].keywords == [
'Toegevoegd GDI-Vl', 'Herbruikbaar', 'Vlaamse Open data',
'Kosteloos', 'Lijst M&R INSPIRE', 'Metadata INSPIRE-conform',
'Metadata GDI-Vl-conform']
assert iden.keywords2[4].type is None
assert iden.keywords2[4].thesaurus['title'] == "DOV"
assert iden.keywords2[4].thesaurus['date'] == '2010-12-01'
assert iden.keywords2[4].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[4].keywords, 7)
assert iden.keywords2[4].keywords == [
'Ondergrond', 'DOV', 'Vlaanderen', 'monitoring', 'meetnetten',
'Kaderrichtlijn Water', 'Decreet Integraal waterbeleid']
assert_list(iden.topiccategory, 1)
assert iden.topiccategory[0] == 'geoscientificInformation'
assert iden.supplementalinformation == \
"https://www.dov.vlaanderen.be/page/grondwatermeetnet"
assert_list(md.contentinfo, 1)
ci = md.contentinfo[0]
assert ci.compliancecode is None
assert_list(ci.language, 0)
assert ci.includedwithdataset == True
assert_list(ci.featuretypenames, 0)
assert_list(ci.featurecatalogues, 1)
assert ci.featurecatalogues[0] == 'b142965f-b2aa-429e-86ff-a7cb0e065d48'
def test_md_parsing_geobretagne():
md_resource = get_md_resource(
'tests/resources/csw_geobretagne_mdmetadata.xml')
md = MD_Metadata(md_resource)
assert type(md) is MD_Metadata
assert md.identifier == '955c3e47-411e-4969-b61b-3556d1b9f879'
assert md.dataseturi is None
assert md.parentidentifier is None
assert md.language == 'fre'
assert md.languagecode is None
assert md.charset == 'utf8'
assert md.datestamp == '2018-07-30T14:19:40'
assert md.hierarchy == 'dataset'
assert_list(md.contact, 1)
contact = md.contact[0]
assert contact.organization == 'DIRECTION GENERALE DES FINANCES ' \
'PUBLIQUES BUREAU GF-3A'
assert contact.address is None
assert contact.city is None
assert contact.postcode is None
assert contact.country is None
assert contact.email == 'bureau.gf3a@dgfip.finances.gouv.fr'
assert contact.onlineresource is None
assert contact.role == 'pointOfContact'
assert md.stdname == 'ISO 19115'
assert md.stdver == '1.0'
assert md.referencesystem.code == 'RGF93 / CC48 (EPSG:3948)'
assert md.referencesystem.codeSpace == 'EPSG'
assert_list(md.identificationinfo, 1)
iden = md.identificationinfo[0]
assert iden.title == 'Cadastre 2018 en Bretagne'
assert iden.alternatetitle is None
assert_list(iden.date, 1)
assert iden.date[0].date == '2018-09-01'
assert iden.date[0].type == 'revision'
assert_list(iden.uricode, 1)
assert iden.uricode[0] == 'https://geobretagne.fr/geonetwork/apps/georchestra/?uuid=363e3a8e-d0ce-497d-87a9-2a2d58d82772'
assert_list(iden.uricodespace, 0)
assert_list(iden.uselimitation, 2)
assert u"le plan cadastral décrit les limites apparentes de la " \
u"propriété." in iden.uselimitation
assert_list(iden.uselimitation_url, 0)
assert_list(iden.accessconstraints, 1)
assert iden.accessconstraints[0] == 'otherRestrictions'
assert_list(iden.classification, 0)
assert_list(iden.otherconstraints, 1)
assert iden.otherconstraints[
0] == u'Usage libre sous réserve des mentions obligatoires ' \
u'sur tout document de diffusion : "Source : DGFIP"'
assert_list(iden.securityconstraints, 0)
assert_list(iden.useconstraints, 1)
assert iden.useconstraints[0] == 'copyright'
assert_list(iden.denominators, 1)
assert iden.denominators[0] == '500'
assert_list(iden.distance, 0)
assert_list(iden.uom, 0)
assert_list(iden.resourcelanguage, 1)
assert iden.resourcelanguage[0] == 'fre'
assert_list(iden.resourcelanguagecode, 0)
assert_list(iden.creator, 0)
assert_list(iden.publisher, 0)
assert_list(iden.contributor, 0)
assert iden.edition is None
assert iden.abstract.startswith(
u"Le plan du cadastre est un document administratif qui propose "
u"l’unique plan parcellaire à grande échelle couvrant le territoire "
u"national.")
assert iden.purpose.startswith(
u"Le but premier du plan cadastral est d'identifier, de localiser et "
u"représenter la propriété foncière, ainsi que de servir à l'assise "
u"de la fiscalité locale des propriétés non bâties.")
assert iden.status == 'completed'
assert_list(iden.contact, 1)
assert iden.contact[0].organization == 'DGFIP Bretagne'
assert iden.contact[0].name == 'DIRECTION GENERALE DES FINANCES PUBLIQUES'
assert iden.contact[0].address is None
assert iden.contact[0].city is None
assert iden.contact[0].postcode is None
assert iden.contact[0].country is None
assert iden.contact[0].email == 'bureau.gf3a@dgfip.finances.gouv.fr'
assert iden.contact[0].onlineresource is None
assert iden.contact[0].role == 'pointOfContact'
assert_list(iden.spatialrepresentationtype, 1)
assert iden.spatialrepresentationtype[0] == 'vector'
assert_list(iden.keywords, 7)
assert type(iden.keywords[0]) is dict
assert iden.keywords[0]['type'] == 'place'
assert iden.keywords[0]['thesaurus']['title'] is None
assert iden.keywords[0]['thesaurus']['date'] is None
assert iden.keywords[0]['thesaurus']['datetype'] is None
assert_list(iden.keywords[0]['keywords'], 1)
assert iden.keywords[0]['keywords'] == ['France']
assert type(iden.keywords[1]) is dict
assert iden.keywords[1]['type'] is None
assert iden.keywords[1]['thesaurus']['title'] is None
assert iden.keywords[1]['thesaurus']['date'] is None
assert iden.keywords[1]['thesaurus']['datetype'] is None
assert_list(iden.keywords[1]['keywords'], 0)
assert type(iden.keywords[2]) is dict
assert iden.keywords[2]['type'] == 'theme'
assert iden.keywords[2]['thesaurus']['title'] is None
assert iden.keywords[2]['thesaurus']['date'] is None
assert iden.keywords[2]['thesaurus']['datetype'] is None
assert_list(iden.keywords[2]['keywords'], 7)
assert iden.keywords[2]['keywords'] == [
u'bâtiments', 'adresses', 'parcelles cadastrales', 'hydrographie',
u'réseaux de transport', u'unités administratives',
u'référentiels de coordonnées']
assert type(iden.keywords[3]) is dict
assert iden.keywords[3]['type'] == 'theme'
assert iden.keywords[3]['thesaurus']['title'] is None
assert iden.keywords[3]['thesaurus']['date'] is None
assert iden.keywords[3]['thesaurus']['datetype'] is None
assert_list(iden.keywords[3]['keywords'], 5)
assert iden.keywords[3]['keywords'] == [
u'bâtis', 'sections', 'parcelles', 'cadastre', 'cadastrale']
assert type(iden.keywords[4]) is dict
assert iden.keywords[4]['type'] == 'theme'
assert iden.keywords[4]['thesaurus']['title'] == u"GéoBretagne v 2.0"
assert iden.keywords[4]['thesaurus']['date'] == '2014-01-13'
assert iden.keywords[4]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[4]['keywords'], 1)
assert iden.keywords[4]['keywords'] == [u'référentiels : cadastre']
assert type(iden.keywords[5]) is dict
assert iden.keywords[5]['type'] == 'theme'
assert iden.keywords[5]['thesaurus']['title'] == "INSPIRE themes"
assert iden.keywords[5]['thesaurus']['date'] == '2008-06-01'
assert iden.keywords[5]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[5]['keywords'], 1)
assert iden.keywords[5]['keywords'] == ['Parcelles cadastrales']
assert type(iden.keywords[6]) is dict
assert iden.keywords[6]['type'] == 'theme'
assert iden.keywords[6]['thesaurus']['title'] == "GEMET"
assert iden.keywords[6]['thesaurus']['date'] == '2012-07-20'
assert iden.keywords[6]['thesaurus']['datetype'] == 'publication'
assert_list(iden.keywords[6]['keywords'], 2)
assert iden.keywords[6]['keywords'] == ['cadastre', u'bâtiment']
assert_list(iden.keywords2, 6)
assert iden.keywords2[0].type == 'place'
assert iden.keywords2[0].thesaurus is None
assert_list(iden.keywords2[0].keywords, 1)
assert iden.keywords2[0].keywords == ['France']
assert iden.keywords2[1].type == 'theme'
assert iden.keywords2[1].thesaurus is None
assert_list(iden.keywords2[1].keywords, 7)
assert iden.keywords2[1].keywords == [
u'bâtiments', 'adresses', 'parcelles cadastrales', 'hydrographie',
u'réseaux de transport', u'unités administratives',
u'référentiels de coordonnées']
assert iden.keywords2[2].type == 'theme'
assert iden.keywords2[2].thesaurus is None
assert_list(iden.keywords2[2].keywords, 5)
assert iden.keywords2[2].keywords == [
u'bâtis', 'sections', 'parcelles', 'cadastre', 'cadastrale']
assert iden.keywords2[3].type == 'theme'
assert iden.keywords2[3].thesaurus['title'] == u"GéoBretagne v 2.0"
assert iden.keywords2[3].thesaurus['date'] == '2014-01-13'
assert iden.keywords2[3].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[3].keywords, 1)
assert iden.keywords2[3].keywords == [u'référentiels : cadastre']
assert iden.keywords2[4].type == 'theme'
assert iden.keywords2[4].thesaurus['title'] == "INSPIRE themes"
assert iden.keywords2[4].thesaurus['date'] == '2008-06-01'
assert iden.keywords2[4].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[4].keywords, 1)
assert iden.keywords2[4].keywords == ['Parcelles cadastrales']
assert iden.keywords2[5].type == 'theme'
assert iden.keywords2[5].thesaurus['title'] == "GEMET"
assert iden.keywords2[5].thesaurus['date'] == '2012-07-20'
assert iden.keywords2[5].thesaurus['datetype'] == 'publication'
assert_list(iden.keywords2[5].keywords, 2)
assert iden.keywords2[5].keywords == ['cadastre', u'bâtiment']
assert_list(iden.topiccategory, 1)
assert iden.topiccategory[0] == 'planningCadastre'
assert iden.supplementalinformation == \
u"La légende du plan cadastral est consultable sur: " \
"http://www.cadastre.gouv.fr/scpc/pdf/legendes/FR_fr/Legende%20du" \
"%20plan%20sur%20internet.pdf"
assert_list(md.contentinfo, 1)
ci = md.contentinfo[0]
assert ci.compliancecode is None
assert_list(ci.language, 0)
assert ci.includedwithdataset == False
assert_list(ci.featuretypenames, 0)
assert_list(ci.featurecatalogues, 0)
def test_md_parsing_19115_2():
md_resource = get_md_resource(
'tests/resources/iso_mi.xml')
md = MD_Metadata(md_resource)
assert type(md) is MD_Metadata
assert md.identifier == '3f342f64-9348-11df-ba6a-0014c2c00eab'
ci = md.contentinfo[0]
assert ci.type == 'image'
assert ci.cloud_cover == '72'
assert ci.processing_level == '1.0'
band = ci.bands[0]
assert band.id == 'B1'
assert band.units == 'nm'
assert band.min == '932'
assert band.max == '958'
plt = md.acquisition.platforms[0]
assert plt.identifier == 'LANDSAT_8'
assert plt.description == 'Landsat 8'
inst = plt.instruments[0]
assert inst.identifier == 'OLI_TIRS'
assert inst.type == 'INS-NOBS'
| true | true |
f71bcee66968f36d4da1975fc465b3d5f8cf8ea5 | 1,789 | py | Python | tegaki/core.py | RShirohara/handwriting_detection | f24aba8ac695fef064d090db78229ab482f342cd | [
"MIT"
] | null | null | null | tegaki/core.py | RShirohara/handwriting_detection | f24aba8ac695fef064d090db78229ab482f342cd | [
"MIT"
] | 1 | 2021-02-24T00:31:12.000Z | 2021-02-26T00:11:24.000Z | tegaki/core.py | RShirohara/handwriting_detection | f24aba8ac695fef064d090db78229ab482f342cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# author: @RShirohara
# TODO: #8
from time import sleep
from .detect import DetectArea
from .googleapis import DetectText, GetTTS
from .send import PlayMP3
from .util import VideoStream, QueueConnector
class Tegaki:
"""HandWriting Detection core class.
Attributes:
capture (VideoStream): Video stream from source device or file.
cap_params (CapParams): infomation of source device or file.
"""
def __init__(self, model_dir, src=0, width=None, height=None, maxsize=0):
"""Initilize core class.
Args:
model_dir (str): Path to protocol buffer (.pb) file.
src (str, int): Path to capture device or file.
width (int): Width of the frames in stream.
height (int): Height of the frames in stream.
maxsize (int): Upperbound limit on the item in the queue.
"""
self.capture = VideoStream(
src=src, width=width, height=height
).start()
self.cap_params = self.capture.info()
self.th_play = PlayMP3(daemon=True, maxsize=maxsize)
self.th_tts = GetTTS(
QueueConnector([self.th_play]),
daemon=True,
maxsize=maxsize
)
self.th_ocr = DetectText(
self.th_tts,
daemon=True,
maxsize=maxsize
)
self.th_det = DetectArea(
self.th_ocr,
model_dir,
self.cap_params,
daemon=True,
maxsize=maxsize
)
def run(self):
"""Exec."""
self.th_play.start()
self.th_tts.start()
self.th_ocr.start()
self.th_det.start()
while True:
self.th_det.put(self.capture.read())
sleep(5)
| 26.308824 | 77 | 0.57071 |
from time import sleep
from .detect import DetectArea
from .googleapis import DetectText, GetTTS
from .send import PlayMP3
from .util import VideoStream, QueueConnector
class Tegaki:
def __init__(self, model_dir, src=0, width=None, height=None, maxsize=0):
self.capture = VideoStream(
src=src, width=width, height=height
).start()
self.cap_params = self.capture.info()
self.th_play = PlayMP3(daemon=True, maxsize=maxsize)
self.th_tts = GetTTS(
QueueConnector([self.th_play]),
daemon=True,
maxsize=maxsize
)
self.th_ocr = DetectText(
self.th_tts,
daemon=True,
maxsize=maxsize
)
self.th_det = DetectArea(
self.th_ocr,
model_dir,
self.cap_params,
daemon=True,
maxsize=maxsize
)
def run(self):
self.th_play.start()
self.th_tts.start()
self.th_ocr.start()
self.th_det.start()
while True:
self.th_det.put(self.capture.read())
sleep(5)
| true | true |
f71bceeb4b6085939b05dc07c82d4eea47511809 | 865 | py | Python | OpenPGPAbs/gpgBackends/__init__.py | KOLANICH/OpenPGPAbs | 1052422a74c3970990491972f81be8eb142d2dd7 | [
"Unlicense"
] | null | null | null | OpenPGPAbs/gpgBackends/__init__.py | KOLANICH/OpenPGPAbs | 1052422a74c3970990491972f81be8eb142d2dd7 | [
"Unlicense"
] | null | null | null | OpenPGPAbs/gpgBackends/__init__.py | KOLANICH/OpenPGPAbs | 1052422a74c3970990491972f81be8eb142d2dd7 | [
"Unlicense"
] | null | null | null | _backendsNames = ("bouncyCastle", "pgpy")
from pathlib import Path
from os.path import expanduser
from enum import IntFlag
from abc import ABC, abstractmethod
keyringPath = Path(expanduser("~/.gnupg/pubring.kbx"))
class SecurityIssues(IntFlag):
OK = 0
wrongSig = (1 << 0)
expired = (1 << 1)
disabled = (1 << 2)
revoked = (1 << 3)
invalid = (1 << 4)
brokenAssymetricFunc = (1 << 5)
hashFunctionNotCollisionResistant = (1 << 6)
hashFunctionNotSecondPreimageResistant = (1 << 7)
assymetricKeyLengthIsTooShort = (1 << 8)
insecureCurve = (1 << 9)
noSelfSignature = (1 << 10)
class Backend(ABC):
__slots__ = ()
@abstractmethod
def verifyBlob(signedData: bytes, signature: bytes, *, keyFingerprint: str = None, keyFile: Path = None, subkeyFingerprint: str = None):
raise NotImplementedError
def isConsideredInsecure(k):
raise NotImplementedError
| 26.212121 | 137 | 0.713295 | _backendsNames = ("bouncyCastle", "pgpy")
from pathlib import Path
from os.path import expanduser
from enum import IntFlag
from abc import ABC, abstractmethod
keyringPath = Path(expanduser("~/.gnupg/pubring.kbx"))
class SecurityIssues(IntFlag):
OK = 0
wrongSig = (1 << 0)
expired = (1 << 1)
disabled = (1 << 2)
revoked = (1 << 3)
invalid = (1 << 4)
brokenAssymetricFunc = (1 << 5)
hashFunctionNotCollisionResistant = (1 << 6)
hashFunctionNotSecondPreimageResistant = (1 << 7)
assymetricKeyLengthIsTooShort = (1 << 8)
insecureCurve = (1 << 9)
noSelfSignature = (1 << 10)
class Backend(ABC):
__slots__ = ()
@abstractmethod
def verifyBlob(signedData: bytes, signature: bytes, *, keyFingerprint: str = None, keyFile: Path = None, subkeyFingerprint: str = None):
raise NotImplementedError
def isConsideredInsecure(k):
raise NotImplementedError
| true | true |
f71bcf92d85264acc7dde6e6008f78b323781cda | 417 | py | Python | pinaxcon/hooks.py | n6151h/pyconau2017 | 092de5fd60d2b0dd207242cf2585e16ec6843392 | [
"MIT"
] | 7 | 2015-12-15T22:54:42.000Z | 2018-12-29T03:31:51.000Z | pinaxcon/hooks.py | n6151h/pyconau2017 | 092de5fd60d2b0dd207242cf2585e16ec6843392 | [
"MIT"
] | 59 | 2017-08-09T02:19:42.000Z | 2021-11-30T03:16:58.000Z | config/hooks.py | pyung/pycon-ng | fc7d1709e5da6f3013886d7a3099bd3d617b0df1 | [
"MIT"
] | 11 | 2016-01-03T18:04:58.000Z | 2021-09-19T06:01:25.000Z | import markdown
import pinax.boxes.hooks
import pinax.pages.hooks
def markup_renderer(content):
return markdown.markdown(content)
class PinaxBoxesHookSet(pinax.boxes.hooks.DefaultHookSet):
def parse_content(self, content):
return markup_renderer(content)
class PinaxPagesHookSet(pinax.pages.hooks.DefaultHookSet):
def parse_content(self, content):
return markup_renderer(content)
| 19.857143 | 58 | 0.772182 | import markdown
import pinax.boxes.hooks
import pinax.pages.hooks
def markup_renderer(content):
return markdown.markdown(content)
class PinaxBoxesHookSet(pinax.boxes.hooks.DefaultHookSet):
def parse_content(self, content):
return markup_renderer(content)
class PinaxPagesHookSet(pinax.pages.hooks.DefaultHookSet):
def parse_content(self, content):
return markup_renderer(content)
| true | true |
f71bcffe0e1b5a79ea7623d2e1090e3853c423db | 1,399 | py | Python | vixautotimer/src/WebChilds/UploadResource.py | wedebe/enigma2-plugins | 58e1897866ad65294283970e96e5f2841c3cb6e2 | [
"OLDAP-2.3"
] | null | null | null | vixautotimer/src/WebChilds/UploadResource.py | wedebe/enigma2-plugins | 58e1897866ad65294283970e96e5f2841c3cb6e2 | [
"OLDAP-2.3"
] | null | null | null | vixautotimer/src/WebChilds/UploadResource.py | wedebe/enigma2-plugins | 58e1897866ad65294283970e96e5f2841c3cb6e2 | [
"OLDAP-2.3"
] | null | null | null | from os import write as os_write, close as os_close, O_WRONLY as os_O_WRONLY, O_CREAT as os_O_CREAT, open as os_open, remove as os_remove
from twisted.web import resource, http
class UploadResource(resource.Resource):
FILENAME = "/tmp/autotimer_backup.tar"
def __init__(self, session):
self.session = session
resource.Resource.__init__(self)
def render_POST(self, req):
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml;')
req.setHeader('charset', 'UTF-8')
data = req.args['file'][0]
if not data:
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>False</e2state>
<e2statetext>Filesize was 0, not uploaded</e2statetext>
</e2simplexmlresult>\n"""
return result
fd = os_open(self.FILENAME, os_O_WRONLY | os_O_CREAT)
if fd:
cnt = os_write(fd, data)
os_close(fd)
if cnt <= 0:
try:
os_remove(FILENAME)
except OSError, oe:
pass
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>False</e2state>
<e2statetext>Error writing to disk, not uploaded</e2statetext>
</e2simplexmlresult>\n"""
else:
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>True</e2state>
<e2statetext>%s</e2statetext>
</e2simplexmlresult>\n""" % self.FILENAME
return result
| 31.088889 | 137 | 0.676197 | from os import write as os_write, close as os_close, O_WRONLY as os_O_WRONLY, O_CREAT as os_O_CREAT, open as os_open, remove as os_remove
from twisted.web import resource, http
class UploadResource(resource.Resource):
FILENAME = "/tmp/autotimer_backup.tar"
def __init__(self, session):
self.session = session
resource.Resource.__init__(self)
def render_POST(self, req):
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml;')
req.setHeader('charset', 'UTF-8')
data = req.args['file'][0]
if not data:
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>False</e2state>
<e2statetext>Filesize was 0, not uploaded</e2statetext>
</e2simplexmlresult>\n"""
return result
fd = os_open(self.FILENAME, os_O_WRONLY | os_O_CREAT)
if fd:
cnt = os_write(fd, data)
os_close(fd)
if cnt <= 0:
try:
os_remove(FILENAME)
except OSError, oe:
pass
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>False</e2state>
<e2statetext>Error writing to disk, not uploaded</e2statetext>
</e2simplexmlresult>\n"""
else:
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>True</e2state>
<e2statetext>%s</e2statetext>
</e2simplexmlresult>\n""" % self.FILENAME
return result
| false | true |
f71bd131825e1190622a69fa2bed7b9d67ded123 | 7,429 | py | Python | selam/prepdata.py | tsoonjin/selam | fbbb355490271bf09056e05b23245be1b75ae24d | [
"MIT"
] | 3 | 2020-10-14T06:05:26.000Z | 2021-07-21T15:43:54.000Z | selam/prepdata.py | tsoonjin/selam | fbbb355490271bf09056e05b23245be1b75ae24d | [
"MIT"
] | null | null | null | selam/prepdata.py | tsoonjin/selam | fbbb355490271bf09056e05b23245be1b75ae24d | [
"MIT"
] | 1 | 2020-05-08T12:59:35.000Z | 2020-05-08T12:59:35.000Z | #!/bin/bash
import os
import sys
import random
import cv2
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from sklearn.decomposition import PCA, NMF
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from keras.preprocessing.image import ImageDataGenerator
from selam.utils import img
def sample_negative(img, rect, n=1, size=(100, 100)):
""" Sample n negative samples randomly
@param rect: [x1, y1, x2, y2]
@param n: number of negative samples
@param size: size of negative window
"""
samples = []
maxHeight, maxWidth = img.shape[:-1]
width = abs(rect[0] - rect[2])
height = abs(rect[1] - rect[3])
while len(samples) != n:
tmpX = int(random.random() * (maxWidth - width))
tmpY = int(random.random() * (maxHeight - height))
isNotOverlapX = tmpX + width < rect[0] or tmpX > rect[2]
isNotOverlapY = tmpY + height < rect[1] or tmpY > rect[3]
# Only accepts sample that does not overlap with ground truth
if isNotOverlapX and isNotOverlapY:
samples.append(cv2.resize(
img[tmpY: tmpY + height, tmpX: tmpX + width], size))
return samples
def get_roi(img, rect, size=(100, 100)):
""" Return extracted bounding box given 4 corners of a rectangle
size: size of training image
@return roi, [x1, y1, x2, y2]
"""
xpos = rect[0::2]
ypos = rect[1::2]
y = [int(min(ypos)), int(max(ypos))]
x = [int(min(xpos)), int(max(xpos))]
roi = img[y[0]:y[1], x[0]:x[1]]
return cv2.resize(roi, size), [x[0], y[0], x[1], y[1]]
def get_jpgs(dirpath, skip=0, resize=None):
""" Returns all images located in given dirpath
skip : number of frames skip to reduce computation time
resize: scale factor for resize
"""
filenames = os.listdir(dirpath)
# Only attempt to parse and sort files that end with .jpg
filenames = [filename for filename in filenames
if filename.endswith(".jpg") or filename.endswith(".png")]
filenames.sort(key=lambda x: int(x.split('.', 1)[0]))
frames = [cv2.imread('{}/{}'.format(dirpath, filename))
for filename in filenames]
out = frames[0::skip] if skip > 0 else frames
print('Read {} images from {}'.format(len(out), dirpath))
if resize:
new_size = (out[0].shape[1] / resize, out[0].shape[0] / resize)
return map(lambda x: cv2.resize(x, new_size), out)
return out
def extract_training(dataset_path, annotation):
""" Returns a list of labelled images as positive training data
Uses default size of 100 x 100 as training patch
@return positive samples, negative samples
"""
positives = []
negatives = []
imgs = get_jpgs(dataset_path)
with open(annotation) as ann:
for i, label in zip(imgs, ann):
rect = map(float, label.rstrip().split(','))
if rect[0] > 0:
roi, coord = get_roi(i, rect)
negatives.extend(sample_negative(i, coord))
positives.append(roi)
print("{} positive samples".format(len(positives)))
print("{} negative samples".format(len(negatives)))
return positives, negatives
def augment_data(imgs, augment_dir, prefix, n=20):
""" Augment imgs with various transformations
@param augment_dir: directory to save augmented images
@param prefix: prefix of filename
@param n: number of transformations per image
"""
n_samples = len(imgs)
datagen = ImageDataGenerator(
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
for i in imgs:
selected = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
selected = selected.reshape((1, ) + selected.shape)
for x, batch in enumerate(datagen.flow(selected, batch_size=1,
save_to_dir=augment_dir,
save_prefix=prefix,
save_format='jpeg')):
if x > n:
break
def kfold(x, y, eval_size=0.10):
""" Split dataset into training set and validation set
@param eval_size: percentage of data used for evaluation
@return X_train, X_valid, Y_train, Y_valid
"""
return train_test_split(x, y, test_size=eval_size, random_state=0)
def std_zscore(X):
""" Z-score standardization by subtracting mean and divided by standard
deviation of dataset
"""
scaler = preprocessing.StandardScaler().fit(X)
return scaler.transform(X)
def std_minmax(X):
scaler = preprocessing.MinMaxScaler().fit(X)
return scaler.transform(X)
def reduce_pca(X, h, w, n=15, display=True):
""" Performs PCA decomposition using n components """
pca = PCA(n_components=n, svd_solver='randomized',
whiten=True).fit(X)
eigenfaces = pca.components_.reshape((n, h, w, -1))
if display:
for i in eigenfaces:
cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))
cv2.waitKey(0)
return pca.transform(X)
def reduce_nmf(X, h, w, n=15, display=False):
""" Performs Non-negative matrix factorization using n components """
model = NMF(n_components=n, init='random', random_state=0).fit(X)
components = model.components_.reshape((n, h, w, -1))
if display:
for i in components:
cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))
cv2.waitKey(0)
return model.transform(X)
def classify_svm(X_train, Y_train):
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf.fit(X_train, Y_train)
return clf
def classify_rf(X_train, Y_train):
param_grid = {'n_estimators': [50, 200, 700],
'max_features': ['auto', 'sqrt', 'log2']}
clf = GridSearchCV(RandomForestClassifier(n_estimators=500, oob_score=True), param_grid)
clf.fit(X_train, Y_train)
return clf
def classify_gp(X, Y):
# Using same lengthscale for all features
kernel = 1.0 * RBF([1.0])
gpc_rbf = GaussianProcessClassifier(kernel=kernel).fit(X, Y)
return gpc_rbf
def classify_xgb(X, Y):
xgb_model = xgb.XGBClassifier()
parameters = {'nthread':[4], #when use hyperthread, xgboost may become slower
'objective':['binary:logistic'],
'learning_rate': [0.05], #so called `eta` value
'max_depth': [6],
'min_child_weight': [11],
'silent': [1],
'subsample': [0.8],
'colsample_bytree': [0.7],
'n_estimators': [5], #number of trees, change it to 1000 for better results
'missing':[-999],
'seed': [1337]}
clf = GridSearchCV(xgb_model, parameters)
clf.fit(X, Y)
return clf
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: python extract_region.py <dataset directory> <annotation file> <prefix> \n")
exit()
positives, negatives = extract_training(sys.argv[1], sys.argv[2])
| 34.714953 | 98 | 0.629964 |
import os
import sys
import random
import cv2
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from sklearn.decomposition import PCA, NMF
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from keras.preprocessing.image import ImageDataGenerator
from selam.utils import img
def sample_negative(img, rect, n=1, size=(100, 100)):
samples = []
maxHeight, maxWidth = img.shape[:-1]
width = abs(rect[0] - rect[2])
height = abs(rect[1] - rect[3])
while len(samples) != n:
tmpX = int(random.random() * (maxWidth - width))
tmpY = int(random.random() * (maxHeight - height))
isNotOverlapX = tmpX + width < rect[0] or tmpX > rect[2]
isNotOverlapY = tmpY + height < rect[1] or tmpY > rect[3]
if isNotOverlapX and isNotOverlapY:
samples.append(cv2.resize(
img[tmpY: tmpY + height, tmpX: tmpX + width], size))
return samples
def get_roi(img, rect, size=(100, 100)):
xpos = rect[0::2]
ypos = rect[1::2]
y = [int(min(ypos)), int(max(ypos))]
x = [int(min(xpos)), int(max(xpos))]
roi = img[y[0]:y[1], x[0]:x[1]]
return cv2.resize(roi, size), [x[0], y[0], x[1], y[1]]
def get_jpgs(dirpath, skip=0, resize=None):
filenames = os.listdir(dirpath)
filenames = [filename for filename in filenames
if filename.endswith(".jpg") or filename.endswith(".png")]
filenames.sort(key=lambda x: int(x.split('.', 1)[0]))
frames = [cv2.imread('{}/{}'.format(dirpath, filename))
for filename in filenames]
out = frames[0::skip] if skip > 0 else frames
print('Read {} images from {}'.format(len(out), dirpath))
if resize:
new_size = (out[0].shape[1] / resize, out[0].shape[0] / resize)
return map(lambda x: cv2.resize(x, new_size), out)
return out
def extract_training(dataset_path, annotation):
positives = []
negatives = []
imgs = get_jpgs(dataset_path)
with open(annotation) as ann:
for i, label in zip(imgs, ann):
rect = map(float, label.rstrip().split(','))
if rect[0] > 0:
roi, coord = get_roi(i, rect)
negatives.extend(sample_negative(i, coord))
positives.append(roi)
print("{} positive samples".format(len(positives)))
print("{} negative samples".format(len(negatives)))
return positives, negatives
def augment_data(imgs, augment_dir, prefix, n=20):
n_samples = len(imgs)
datagen = ImageDataGenerator(
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
for i in imgs:
selected = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
selected = selected.reshape((1, ) + selected.shape)
for x, batch in enumerate(datagen.flow(selected, batch_size=1,
save_to_dir=augment_dir,
save_prefix=prefix,
save_format='jpeg')):
if x > n:
break
def kfold(x, y, eval_size=0.10):
return train_test_split(x, y, test_size=eval_size, random_state=0)
def std_zscore(X):
scaler = preprocessing.StandardScaler().fit(X)
return scaler.transform(X)
def std_minmax(X):
scaler = preprocessing.MinMaxScaler().fit(X)
return scaler.transform(X)
def reduce_pca(X, h, w, n=15, display=True):
pca = PCA(n_components=n, svd_solver='randomized',
whiten=True).fit(X)
eigenfaces = pca.components_.reshape((n, h, w, -1))
if display:
for i in eigenfaces:
cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))
cv2.waitKey(0)
return pca.transform(X)
def reduce_nmf(X, h, w, n=15, display=False):
model = NMF(n_components=n, init='random', random_state=0).fit(X)
components = model.components_.reshape((n, h, w, -1))
if display:
for i in components:
cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))
cv2.waitKey(0)
return model.transform(X)
def classify_svm(X_train, Y_train):
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf.fit(X_train, Y_train)
return clf
def classify_rf(X_train, Y_train):
param_grid = {'n_estimators': [50, 200, 700],
'max_features': ['auto', 'sqrt', 'log2']}
clf = GridSearchCV(RandomForestClassifier(n_estimators=500, oob_score=True), param_grid)
clf.fit(X_train, Y_train)
return clf
def classify_gp(X, Y):
kernel = 1.0 * RBF([1.0])
gpc_rbf = GaussianProcessClassifier(kernel=kernel).fit(X, Y)
return gpc_rbf
def classify_xgb(X, Y):
xgb_model = xgb.XGBClassifier()
parameters = {'nthread':[4],
'objective':['binary:logistic'],
'learning_rate': [0.05],
'max_depth': [6],
'min_child_weight': [11],
'silent': [1],
'subsample': [0.8],
'colsample_bytree': [0.7],
'n_estimators': [5],
'missing':[-999],
'seed': [1337]}
clf = GridSearchCV(xgb_model, parameters)
clf.fit(X, Y)
return clf
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: python extract_region.py <dataset directory> <annotation file> <prefix> \n")
exit()
positives, negatives = extract_training(sys.argv[1], sys.argv[2])
| true | true |
f71bd2ea8e759bf953972aa21026569e55e95cd0 | 3,360 | py | Python | RLAgents/lib_common/WrapperSuperMario.py | michalnand/reinforcement_learning_agents | 45f02c23b1135c87311dce5a52f6e643e4313fc3 | [
"MIT"
] | 2 | 2021-08-05T20:50:41.000Z | 2021-12-25T11:00:38.000Z | RLAgents/lib_common/WrapperSuperMario.py | michalnand/reinforcement_learning_agents | 45f02c23b1135c87311dce5a52f6e643e4313fc3 | [
"MIT"
] | null | null | null | RLAgents/lib_common/WrapperSuperMario.py | michalnand/reinforcement_learning_agents | 45f02c23b1135c87311dce5a52f6e643e4313fc3 | [
"MIT"
] | null | null | null | import gym
import numpy
from PIL import Image
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import COMPLEX_MOVEMENT
class NopOpsEnv(gym.Wrapper):
def __init__(self, env=None, max_count=30):
super(NopOpsEnv, self).__init__(env)
self.max_count = max_count
def reset(self):
self.env.reset()
noops = numpy.random.randint(1, self.max_count + 1)
for _ in range(noops):
obs, _, _, _ = self.env.step(0)
return obs
class SkipEnv(gym.Wrapper):
def __init__(self, env, skip = 4):
gym.Wrapper.__init__(self, env)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for i in range(self._skip):
state, reward, done, info = self.env.step(action)
total_reward+= reward
if done:
break
return state, total_reward, done, info
class ResizeEnv(gym.ObservationWrapper):
def __init__(self, env, height = 96, width = 96, frame_stacking = 4):
super(ResizeEnv, self).__init__(env)
self.height = height
self.width = width
self.frame_stacking = frame_stacking
state_shape = (self.frame_stacking, self.height, self.width)
self.dtype = numpy.float32
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=state_shape, dtype=self.dtype)
self.state = numpy.zeros(state_shape, dtype=self.dtype)
def observation(self, state):
img = Image.fromarray(state)
img = img.convert('L')
img = img.resize((self.height, self.width))
for i in reversed(range(self.frame_stacking-1)):
self.state[i+1] = self.state[i].copy()
self.state[0] = numpy.array(img).astype(self.dtype)/255.0
return self.state
class ClipRewardEnv(gym.Wrapper):
def __init__(self, env, no_rewards = False):
gym.Wrapper.__init__(self, env)
self.raw_episodes = 0
self.raw_score = 0.0
self.raw_score_per_episode = 0.0
self.raw_score_total = 0.0
self.no_rewards = no_rewards
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.raw_score+= reward
self.raw_score_total+= reward
if done:
self.raw_episodes+= 1
k = 0.1
self.raw_score_per_episode = (1.0 - k)*self.raw_score_per_episode + k*self.raw_score
self.raw_score = 0.0
reward = reward/15.0
if self.no_rewards:
reward = 0.0
return obs, reward, done, info
def WrapperSuperMario(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):
env = JoypadSpace(env, COMPLEX_MOVEMENT)
env = NopOpsEnv(env)
env = SkipEnv(env, frame_skipping)
env = ResizeEnv(env, height, width, frame_stacking)
env = ClipRewardEnv(env, False)
env.reset()
return env
def WrapperSuperMarioNoRewards(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):
env = JoypadSpace(env, COMPLEX_MOVEMENT)
env = NopOpsEnv(env)
env = SkipEnv(env, frame_skipping)
env = ResizeEnv(env, height, width, frame_stacking)
env = ClipRewardEnv(env, True)
env.reset()
return env | 27.540984 | 103 | 0.612798 | import gym
import numpy
from PIL import Image
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import COMPLEX_MOVEMENT
class NopOpsEnv(gym.Wrapper):
def __init__(self, env=None, max_count=30):
super(NopOpsEnv, self).__init__(env)
self.max_count = max_count
def reset(self):
self.env.reset()
noops = numpy.random.randint(1, self.max_count + 1)
for _ in range(noops):
obs, _, _, _ = self.env.step(0)
return obs
class SkipEnv(gym.Wrapper):
def __init__(self, env, skip = 4):
gym.Wrapper.__init__(self, env)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for i in range(self._skip):
state, reward, done, info = self.env.step(action)
total_reward+= reward
if done:
break
return state, total_reward, done, info
class ResizeEnv(gym.ObservationWrapper):
def __init__(self, env, height = 96, width = 96, frame_stacking = 4):
super(ResizeEnv, self).__init__(env)
self.height = height
self.width = width
self.frame_stacking = frame_stacking
state_shape = (self.frame_stacking, self.height, self.width)
self.dtype = numpy.float32
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=state_shape, dtype=self.dtype)
self.state = numpy.zeros(state_shape, dtype=self.dtype)
def observation(self, state):
img = Image.fromarray(state)
img = img.convert('L')
img = img.resize((self.height, self.width))
for i in reversed(range(self.frame_stacking-1)):
self.state[i+1] = self.state[i].copy()
self.state[0] = numpy.array(img).astype(self.dtype)/255.0
return self.state
class ClipRewardEnv(gym.Wrapper):
def __init__(self, env, no_rewards = False):
gym.Wrapper.__init__(self, env)
self.raw_episodes = 0
self.raw_score = 0.0
self.raw_score_per_episode = 0.0
self.raw_score_total = 0.0
self.no_rewards = no_rewards
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.raw_score+= reward
self.raw_score_total+= reward
if done:
self.raw_episodes+= 1
k = 0.1
self.raw_score_per_episode = (1.0 - k)*self.raw_score_per_episode + k*self.raw_score
self.raw_score = 0.0
reward = reward/15.0
if self.no_rewards:
reward = 0.0
return obs, reward, done, info
def WrapperSuperMario(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):
env = JoypadSpace(env, COMPLEX_MOVEMENT)
env = NopOpsEnv(env)
env = SkipEnv(env, frame_skipping)
env = ResizeEnv(env, height, width, frame_stacking)
env = ClipRewardEnv(env, False)
env.reset()
return env
def WrapperSuperMarioNoRewards(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):
env = JoypadSpace(env, COMPLEX_MOVEMENT)
env = NopOpsEnv(env)
env = SkipEnv(env, frame_skipping)
env = ResizeEnv(env, height, width, frame_stacking)
env = ClipRewardEnv(env, True)
env.reset()
return env | true | true |
f71bd2ef2fd1cafd83ccb890af0c057046ca9fa8 | 46,217 | py | Python | facebook_business/adobjects/advideo.py | alternativshik/facebook-python-business-sdk | 83be60d162ae34ffca186104597fdbb7d1fb7cf2 | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/advideo.py | alternativshik/facebook-python-business-sdk | 83be60d162ae34ffca186104597fdbb7d1fb7cf2 | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/advideo.py | alternativshik/facebook-python-business-sdk | 83be60d162ae34ffca186104597fdbb7d1fb7cf2 | [
"CNRI-Python"
] | 1 | 2021-04-09T22:44:52.000Z | 2021-04-09T22:44:52.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdVideo(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdVideo = True
super(AdVideo, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
ad_breaks = 'ad_breaks'
backdated_time = 'backdated_time'
backdated_time_granularity = 'backdated_time_granularity'
content_category = 'content_category'
content_tags = 'content_tags'
copyright = 'copyright'
copyright_monitoring_status = 'copyright_monitoring_status'
created_time = 'created_time'
custom_labels = 'custom_labels'
description = 'description'
embed_html = 'embed_html'
embeddable = 'embeddable'
event = 'event'
expiration = 'expiration'
format = 'format'
field_from = 'from'
icon = 'icon'
id = 'id'
is_crosspost_video = 'is_crosspost_video'
is_crossposting_eligible = 'is_crossposting_eligible'
is_episode = 'is_episode'
is_instagram_eligible = 'is_instagram_eligible'
is_reference_only = 'is_reference_only'
length = 'length'
live_audience_count = 'live_audience_count'
live_status = 'live_status'
music_video_copyright = 'music_video_copyright'
permalink_url = 'permalink_url'
picture = 'picture'
place = 'place'
premiere_living_room_status = 'premiere_living_room_status'
privacy = 'privacy'
published = 'published'
scheduled_publish_time = 'scheduled_publish_time'
source = 'source'
spherical = 'spherical'
status = 'status'
title = 'title'
universal_video_id = 'universal_video_id'
updated_time = 'updated_time'
adaptive_type = 'adaptive_type'
animated_effect_id = 'animated_effect_id'
application_id = 'application_id'
asked_fun_fact_prompt_id = 'asked_fun_fact_prompt_id'
audio_story_wave_animation_handle = 'audio_story_wave_animation_handle'
chunk_session_id = 'chunk_session_id'
composer_entry_picker = 'composer_entry_picker'
composer_entry_point = 'composer_entry_point'
composer_entry_time = 'composer_entry_time'
composer_session_events_log = 'composer_session_events_log'
composer_session_id = 'composer_session_id'
composer_source_surface = 'composer_source_surface'
composer_type = 'composer_type'
container_type = 'container_type'
creative_tools = 'creative_tools'
end_offset = 'end_offset'
fbuploader_video_file_chunk = 'fbuploader_video_file_chunk'
file_size = 'file_size'
file_url = 'file_url'
fisheye_video_cropped = 'fisheye_video_cropped'
formatting = 'formatting'
fov = 'fov'
front_z_rotation = 'front_z_rotation'
fun_fact_prompt_id = 'fun_fact_prompt_id'
fun_fact_toastee_id = 'fun_fact_toastee_id'
guide = 'guide'
guide_enabled = 'guide_enabled'
has_nickname = 'has_nickname'
holiday_card = 'holiday_card'
initial_heading = 'initial_heading'
initial_pitch = 'initial_pitch'
instant_game_entry_point_data = 'instant_game_entry_point_data'
is_boost_intended = 'is_boost_intended'
is_group_linking_post = 'is_group_linking_post'
is_voice_clip = 'is_voice_clip'
location_source_id = 'location_source_id'
name = 'name'
offer_like_post_id = 'offer_like_post_id'
og_action_type_id = 'og_action_type_id'
og_icon_id = 'og_icon_id'
og_object_id = 'og_object_id'
og_phrase = 'og_phrase'
og_suggestion_mechanism = 'og_suggestion_mechanism'
original_fov = 'original_fov'
original_projection_type = 'original_projection_type'
publish_event_id = 'publish_event_id'
react_mode_metadata = 'react_mode_metadata'
referenced_sticker_id = 'referenced_sticker_id'
replace_video_id = 'replace_video_id'
sales_promo_id = 'sales_promo_id'
slideshow_spec = 'slideshow_spec'
source_instagram_media_id = 'source_instagram_media_id'
start_offset = 'start_offset'
swap_mode = 'swap_mode'
text_format_metadata = 'text_format_metadata'
throwback_camera_roll_media = 'throwback_camera_roll_media'
thumb = 'thumb'
time_since_original_post = 'time_since_original_post'
transcode_setting_properties = 'transcode_setting_properties'
unpublished_content_type = 'unpublished_content_type'
upload_phase = 'upload_phase'
upload_session_id = 'upload_session_id'
upload_setting_properties = 'upload_setting_properties'
video_file_chunk = 'video_file_chunk'
video_id_original = 'video_id_original'
video_start_time_ms = 'video_start_time_ms'
waterfall_id = 'waterfall_id'
filename = 'filename'
filepath = 'filepath'
class ContainerType:
aco_autoextracted_video = 'ACO_AUTOEXTRACTED_VIDEO'
aco_video_variation = 'ACO_VIDEO_VARIATION'
ad_break_preview = 'AD_BREAK_PREVIEW'
ad_derivative = 'AD_DERIVATIVE'
age_up = 'AGE_UP'
album_multimedia_post = 'ALBUM_MULTIMEDIA_POST'
aloha_call_video = 'ALOHA_CALL_VIDEO'
aloha_superframe = 'ALOHA_SUPERFRAME'
app_review_screencast = 'APP_REVIEW_SCREENCAST'
atlas_video = 'ATLAS_VIDEO'
audio_broadcast = 'AUDIO_BROADCAST'
bell_poll = 'BELL_POLL'
brand_equity_poll_video = 'BRAND_EQUITY_POLL_VIDEO'
broadcast = 'BROADCAST'
candidate_videos = 'CANDIDATE_VIDEOS'
canvas = 'CANVAS'
cfc_video = 'CFC_VIDEO'
cms_media_manager = 'CMS_MEDIA_MANAGER'
contained_post_attachment = 'CONTAINED_POST_ATTACHMENT'
contained_post_audio_broadcast = 'CONTAINED_POST_AUDIO_BROADCAST'
contained_post_broadcast = 'CONTAINED_POST_BROADCAST'
copyright_reference_broadcast = 'COPYRIGHT_REFERENCE_BROADCAST'
copyright_reference_video = 'COPYRIGHT_REFERENCE_VIDEO'
cultural_moment_deprecated = 'CULTURAL_MOMENT_DEPRECATED'
dco_ad_asset_feed = 'DCO_AD_ASSET_FEED'
dco_autogen_video = 'DCO_AUTOGEN_VIDEO'
dco_trimmed_video = 'DCO_TRIMMED_VIDEO'
dim_sum = 'DIM_SUM'
directed_post_attachment = 'DIRECTED_POST_ATTACHMENT'
direct_inbox = 'DIRECT_INBOX'
direct_inbox_reaction = 'DIRECT_INBOX_REACTION'
dynamic_item_display_bundle = 'DYNAMIC_ITEM_DISPLAY_BUNDLE'
dynamic_item_video = 'DYNAMIC_ITEM_VIDEO'
dynamic_template_video = 'DYNAMIC_TEMPLATE_VIDEO'
event_cover_video = 'EVENT_COVER_VIDEO'
event_tour = 'EVENT_TOUR'
facecast_dvr = 'FACECAST_DVR'
fb_shorts = 'FB_SHORTS'
fb_shorts_group_post = 'FB_SHORTS_GROUP_POST'
fb_shorts_post = 'FB_SHORTS_POST'
fundraiser_cover_video = 'FUNDRAISER_COVER_VIDEO'
game_clip = 'GAME_CLIP'
gemstone = 'GEMSTONE'
goodwill_anniversary_deprecated = 'GOODWILL_ANNIVERSARY_DEPRECATED'
goodwill_anniversary_promotion_deprecated = 'GOODWILL_ANNIVERSARY_PROMOTION_DEPRECATED'
goodwill_video_contained_share = 'GOODWILL_VIDEO_CONTAINED_SHARE'
goodwill_video_promotion = 'GOODWILL_VIDEO_PROMOTION'
goodwill_video_share = 'GOODWILL_VIDEO_SHARE'
goodwill_video_token_required = 'GOODWILL_VIDEO_TOKEN_REQUIRED'
group_post = 'GROUP_POST'
heuristic_cluster_video = 'HEURISTIC_CLUSTER_VIDEO'
heuristic_preview = 'HEURISTIC_PREVIEW'
highlight_clip_video = 'HIGHLIGHT_CLIP_VIDEO'
ig_reels_xpv = 'IG_REELS_XPV'
ig_stories_reader = 'IG_STORIES_READER'
inspiration_video = 'INSPIRATION_VIDEO'
instagram_video_copy = 'INSTAGRAM_VIDEO_COPY'
instant_application_preview = 'INSTANT_APPLICATION_PREVIEW'
instant_article = 'INSTANT_ARTICLE'
instant_game_clip = 'INSTANT_GAME_CLIP'
issue_module = 'ISSUE_MODULE'
job_application_video = 'JOB_APPLICATION_VIDEO'
job_opening_video = 'JOB_OPENING_VIDEO'
kototoro = 'KOTOTORO'
learn = 'LEARN'
legacy = 'LEGACY'
live_creative_kit_video = 'LIVE_CREATIVE_KIT_VIDEO'
live_linear_video_channel_internal_broadcast = 'LIVE_LINEAR_VIDEO_CHANNEL_INTERNAL_BROADCAST'
live_photo = 'LIVE_PHOTO'
look_now_deprecated = 'LOOK_NOW_DEPRECATED'
marketplace_listing_video = 'MARKETPLACE_LISTING_VIDEO'
marketplace_pre_recorded_video = 'MARKETPLACE_PRE_RECORDED_VIDEO'
moments_video = 'MOMENTS_VIDEO'
neo_async_game_video = 'NEO_ASYNC_GAME_VIDEO'
no_story = 'NO_STORY'
no_story_with_entpost = 'NO_STORY_WITH_ENTPOST'
oculus_creator_portal = 'OCULUS_CREATOR_PORTAL'
oculus_venues_broadcast = 'OCULUS_VENUES_BROADCAST'
offers_video = 'OFFERS_VIDEO'
pages_cover_video = 'PAGES_COVER_VIDEO'
page_review_screencast = 'PAGE_REVIEW_SCREENCAST'
page_slideshow_video = 'PAGE_SLIDESHOW_VIDEO'
pixelcloud = 'PIXELCLOUD'
premiere_source = 'PREMIERE_SOURCE'
private_gallery_video = 'PRIVATE_GALLERY_VIDEO'
product_video = 'PRODUCT_VIDEO'
profile_cover_video = 'PROFILE_COVER_VIDEO'
profile_intro_card = 'PROFILE_INTRO_CARD'
profile_to_page_uploaded_video = 'PROFILE_TO_PAGE_UPLOADED_VIDEO'
profile_video = 'PROFILE_VIDEO'
proton = 'PROTON'
quick_promotion = 'QUICK_PROMOTION'
replace_video = 'REPLACE_VIDEO'
sales_client_interaction = 'SALES_CLIENT_INTERACTION'
say_thanks_deprecated = 'SAY_THANKS_DEPRECATED'
showreel_native_dummy_video = 'SHOWREEL_NATIVE_DUMMY_VIDEO'
slideshow_animoto = 'SLIDESHOW_ANIMOTO'
slideshow_shakr = 'SLIDESHOW_SHAKR'
slideshow_variation_video = 'SLIDESHOW_VARIATION_VIDEO'
sotto_content = 'SOTTO_CONTENT'
sound_platform_stream = 'SOUND_PLATFORM_STREAM'
stories_video = 'STORIES_VIDEO'
storyline = 'STORYLINE'
storyline_with_external_music = 'STORYLINE_WITH_EXTERNAL_MUSIC'
story_archive_video = 'STORY_ARCHIVE_VIDEO'
story_card_template = 'STORY_CARD_TEMPLATE'
stream_highlights_video = 'STREAM_HIGHLIGHTS_VIDEO'
tarot_digest = 'TAROT_DIGEST'
temp_multimedia_post = 'TEMP_MULTIMEDIA_POST'
unlisted = 'UNLISTED'
video_comment = 'VIDEO_COMMENT'
video_creative_editor_autogen_ad_video = 'VIDEO_CREATIVE_EDITOR_AUTOGEN_AD_VIDEO'
video_superres = 'VIDEO_SUPERRES'
vu_generated_video = 'VU_GENERATED_VIDEO'
woodhenge = 'WOODHENGE'
work_knowledge_video = 'WORK_KNOWLEDGE_VIDEO'
your_day = 'YOUR_DAY'
class ContentCategory:
beauty_fashion = 'BEAUTY_FASHION'
business = 'BUSINESS'
cars_trucks = 'CARS_TRUCKS'
comedy = 'COMEDY'
cute_animals = 'CUTE_ANIMALS'
entertainment = 'ENTERTAINMENT'
family = 'FAMILY'
food_health = 'FOOD_HEALTH'
home = 'HOME'
lifestyle = 'LIFESTYLE'
music = 'MUSIC'
news = 'NEWS'
other = 'OTHER'
politics = 'POLITICS'
science = 'SCIENCE'
sports = 'SPORTS'
technology = 'TECHNOLOGY'
video_gaming = 'VIDEO_GAMING'
class Formatting:
markdown = 'MARKDOWN'
plaintext = 'PLAINTEXT'
class OriginalProjectionType:
cubemap = 'cubemap'
equirectangular = 'equirectangular'
half_equirectangular = 'half_equirectangular'
class SwapMode:
replace = 'replace'
class UnpublishedContentType:
ads_post = 'ADS_POST'
draft = 'DRAFT'
inline_created = 'INLINE_CREATED'
published = 'PUBLISHED'
reviewable_branded_content = 'REVIEWABLE_BRANDED_CONTENT'
scheduled = 'SCHEDULED'
scheduled_recurring = 'SCHEDULED_RECURRING'
class UploadPhase:
cancel = 'cancel'
finish = 'finish'
start = 'start'
transfer = 'transfer'
class Type:
tagged = 'tagged'
uploaded = 'uploaded'
class BackdatedTimeGranularity:
day = 'day'
hour = 'hour'
min = 'min'
month = 'month'
none = 'none'
year = 'year'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'advideos'
# @deprecated api_create is being deprecated
def api_create(self, parent_id, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.adobjects.adaccount import AdAccount
return AdAccount(api=self._api, fbid=parent_id).create_ad_video(fields, params, batch, success, failure, pending)
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'ad_breaks': 'list',
'allow_bm_crossposting': 'bool',
'allow_crossposting_for_pages': 'list<Object>',
'backdated_time': 'datetime',
'backdated_time_granularity': 'backdated_time_granularity_enum',
'call_to_action': 'Object',
'content_category': 'content_category_enum',
'content_tags': 'list<string>',
'custom_labels': 'list<string>',
'description': 'string',
'direct_share_status': 'unsigned int',
'embeddable': 'bool',
'expiration': 'Object',
'expire_now': 'bool',
'increment_play_count': 'bool',
'name': 'string',
'preferred_thumbnail_id': 'string',
'privacy': 'string',
'publish_to_news_feed': 'bool',
'publish_to_videos_tab': 'bool',
'published': 'bool',
'scheduled_publish_time': 'unsigned int',
'social_actions': 'bool',
'sponsor_id': 'string',
'sponsor_relationship': 'unsigned int',
'tags': 'list<string>',
'target': 'string',
'universal_video_id': 'string',
}
enums = {
'backdated_time_granularity_enum': AdVideo.BackdatedTimeGranularity.__dict__.values(),
'content_category_enum': AdVideo.ContentCategory.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_captions(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/captions',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_caption(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'captions_file': 'file',
'default_locale': 'string',
'locales_to_delete': 'list<string>',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/captions',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_comments(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.comment import Comment
param_types = {
'filter': 'filter_enum',
'live_filter': 'live_filter_enum',
'order': 'order_enum',
'since': 'datetime',
}
enums = {
'filter_enum': Comment.Filter.__dict__.values(),
'live_filter_enum': Comment.LiveFilter.__dict__.values(),
'order_enum': Comment.Order.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/comments',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Comment,
api_type='EDGE',
response_parser=ObjectParser(target_class=Comment, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_comment(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.comment import Comment
param_types = {
'attachment_id': 'string',
'attachment_share_url': 'string',
'attachment_url': 'string',
'comment_privacy_value': 'comment_privacy_value_enum',
'facepile_mentioned_ids': 'list<string>',
'feedback_source': 'string',
'is_offline': 'bool',
'message': 'string',
'nectar_module': 'string',
'object_id': 'string',
'parent_comment_id': 'Object',
'text': 'string',
'tracking': 'string',
}
enums = {
'comment_privacy_value_enum': Comment.CommentPrivacyValue.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/comments',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Comment,
api_type='EDGE',
response_parser=ObjectParser(target_class=Comment, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_crosspost_shared_pages(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.page import Page
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/crosspost_shared_pages',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Page,
api_type='EDGE',
response_parser=ObjectParser(target_class=Page, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_likes(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.profile import Profile
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/likes',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Profile,
api_type='EDGE',
response_parser=ObjectParser(target_class=Profile, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_like(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'feedback_source': 'string',
'nectar_module': 'string',
'notify': 'bool',
'tracking': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/likes',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_poll_settings(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/poll_settings',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_polls(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videopoll import VideoPoll
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/polls',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VideoPoll,
api_type='EDGE',
response_parser=ObjectParser(target_class=VideoPoll, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_poll(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videopoll import VideoPoll
param_types = {
'close_after_voting': 'bool',
'correct_option': 'unsigned int',
'default_open': 'bool',
'options': 'list<string>',
'question': 'string',
'show_gradient': 'bool',
'show_results': 'bool',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/polls',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VideoPoll,
api_type='EDGE',
response_parser=ObjectParser(target_class=VideoPoll, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_sponsor_tags(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.page import Page
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/sponsor_tags',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Page,
api_type='EDGE',
response_parser=ObjectParser(target_class=Page, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_tags(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/tags',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_tag(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'tag_uid': 'int',
'uid': 'int',
'vid': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/tags',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_thumbnails(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videothumbnail import VideoThumbnail
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/thumbnails',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VideoThumbnail,
api_type='EDGE',
response_parser=ObjectParser(target_class=VideoThumbnail, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_thumbnail(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'is_preferred': 'bool',
'source': 'file',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/thumbnails',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_video_insights(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.insightsresult import InsightsResult
param_types = {
'metric': 'list<Object>',
'period': 'period_enum',
'since': 'datetime',
'until': 'datetime',
}
enums = {
'period_enum': InsightsResult.Period.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/video_insights',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=InsightsResult,
api_type='EDGE',
response_parser=ObjectParser(target_class=InsightsResult, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'ad_breaks': 'list<int>',
'backdated_time': 'datetime',
'backdated_time_granularity': 'string',
'content_category': 'string',
'content_tags': 'list<string>',
'copyright': 'VideoCopyright',
'copyright_monitoring_status': 'string',
'created_time': 'datetime',
'custom_labels': 'list<string>',
'description': 'string',
'embed_html': 'string',
'embeddable': 'bool',
'event': 'Event',
'expiration': 'Object',
'format': 'list<Object>',
'from': 'Object',
'icon': 'string',
'id': 'string',
'is_crosspost_video': 'bool',
'is_crossposting_eligible': 'bool',
'is_episode': 'bool',
'is_instagram_eligible': 'bool',
'is_reference_only': 'bool',
'length': 'float',
'live_audience_count': 'unsigned int',
'live_status': 'string',
'music_video_copyright': 'MusicVideoCopyright',
'permalink_url': 'string',
'picture': 'string',
'place': 'Place',
'premiere_living_room_status': 'string',
'privacy': 'Privacy',
'published': 'bool',
'scheduled_publish_time': 'datetime',
'source': 'string',
'spherical': 'bool',
'status': 'Object',
'title': 'string',
'universal_video_id': 'string',
'updated_time': 'datetime',
'adaptive_type': 'string',
'animated_effect_id': 'unsigned int',
'application_id': 'string',
'asked_fun_fact_prompt_id': 'unsigned int',
'audio_story_wave_animation_handle': 'string',
'chunk_session_id': 'string',
'composer_entry_picker': 'string',
'composer_entry_point': 'string',
'composer_entry_time': 'unsigned int',
'composer_session_events_log': 'string',
'composer_session_id': 'string',
'composer_source_surface': 'string',
'composer_type': 'string',
'container_type': 'ContainerType',
'creative_tools': 'string',
'end_offset': 'unsigned int',
'fbuploader_video_file_chunk': 'string',
'file_size': 'unsigned int',
'file_url': 'string',
'fisheye_video_cropped': 'bool',
'formatting': 'Formatting',
'fov': 'unsigned int',
'front_z_rotation': 'float',
'fun_fact_prompt_id': 'unsigned int',
'fun_fact_toastee_id': 'unsigned int',
'guide': 'list<list<unsigned int>>',
'guide_enabled': 'bool',
'has_nickname': 'bool',
'holiday_card': 'string',
'initial_heading': 'unsigned int',
'initial_pitch': 'unsigned int',
'instant_game_entry_point_data': 'string',
'is_boost_intended': 'bool',
'is_group_linking_post': 'bool',
'is_voice_clip': 'bool',
'location_source_id': 'string',
'name': 'string',
'offer_like_post_id': 'unsigned int',
'og_action_type_id': 'string',
'og_icon_id': 'string',
'og_object_id': 'string',
'og_phrase': 'string',
'og_suggestion_mechanism': 'string',
'original_fov': 'unsigned int',
'original_projection_type': 'OriginalProjectionType',
'publish_event_id': 'unsigned int',
'react_mode_metadata': 'string',
'referenced_sticker_id': 'string',
'replace_video_id': 'string',
'sales_promo_id': 'unsigned int',
'slideshow_spec': 'map',
'source_instagram_media_id': 'string',
'start_offset': 'unsigned int',
'swap_mode': 'SwapMode',
'text_format_metadata': 'string',
'throwback_camera_roll_media': 'string',
'thumb': 'file',
'time_since_original_post': 'unsigned int',
'transcode_setting_properties': 'string',
'unpublished_content_type': 'UnpublishedContentType',
'upload_phase': 'UploadPhase',
'upload_session_id': 'string',
'upload_setting_properties': 'string',
'video_file_chunk': 'string',
'video_id_original': 'string',
'video_start_time_ms': 'unsigned int',
'waterfall_id': 'string',
'filename': 'file'
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['ContainerType'] = AdVideo.ContainerType.__dict__.values()
field_enum_info['ContentCategory'] = AdVideo.ContentCategory.__dict__.values()
field_enum_info['Formatting'] = AdVideo.Formatting.__dict__.values()
field_enum_info['OriginalProjectionType'] = AdVideo.OriginalProjectionType.__dict__.values()
field_enum_info['SwapMode'] = AdVideo.SwapMode.__dict__.values()
field_enum_info['UnpublishedContentType'] = AdVideo.UnpublishedContentType.__dict__.values()
field_enum_info['UploadPhase'] = AdVideo.UploadPhase.__dict__.values()
field_enum_info['Type'] = AdVideo.Type.__dict__.values()
field_enum_info['BackdatedTimeGranularity'] = AdVideo.BackdatedTimeGranularity.__dict__.values()
return field_enum_info
def remote_create(
self,
batch=None,
failure=None,
params=None,
success=None,
):
"""
Uploads filepath and creates the AdVideo object from it.
It has same arguments as AbstractCrudObject.remote_create except it
does not have the files argument but requires the 'filepath' property
to be defined.
"""
from facebook_business.exceptions import FacebookBadObjectError
from facebook_business.video_uploader import (
VideoUploader,
VideoUploadRequest,
)
if (self.Field.slideshow_spec in self and
self[self.Field.slideshow_spec] is not None):
request = VideoUploadRequest(self.get_api_assured())
request.setParams(params={'slideshow_spec': {
'images_urls': self[self.Field.slideshow_spec]['images_urls'],
'duration_ms': self[self.Field.slideshow_spec]['duration_ms'],
'transition_ms': self[self.Field.slideshow_spec]['transition_ms'],
}})
response = request.send((self.get_parent_id_assured(), 'advideos')).json()
elif not (self.Field.filepath in self):
raise FacebookBadObjectError(
"AdVideo requires a filepath or slideshow_spec to be defined.",
)
else:
video_uploader = VideoUploader()
response = video_uploader.upload(self)
self._set_data(response)
return response
def waitUntilEncodingReady(self, interval=30, timeout=600):
from facebook_business.video_uploader import VideoEncodingStatusChecker
from facebook_business.exceptions import FacebookError
if 'id' not in self:
raise FacebookError(
'Invalid Video ID',
)
VideoEncodingStatusChecker.waitUntilReady(
self.get_api_assured(),
self['id'],
interval,
timeout,
)
| 39.400682 | 122 | 0.624857 |
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
class AdVideo(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdVideo = True
super(AdVideo, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
ad_breaks = 'ad_breaks'
backdated_time = 'backdated_time'
backdated_time_granularity = 'backdated_time_granularity'
content_category = 'content_category'
content_tags = 'content_tags'
copyright = 'copyright'
copyright_monitoring_status = 'copyright_monitoring_status'
created_time = 'created_time'
custom_labels = 'custom_labels'
description = 'description'
embed_html = 'embed_html'
embeddable = 'embeddable'
event = 'event'
expiration = 'expiration'
format = 'format'
field_from = 'from'
icon = 'icon'
id = 'id'
is_crosspost_video = 'is_crosspost_video'
is_crossposting_eligible = 'is_crossposting_eligible'
is_episode = 'is_episode'
is_instagram_eligible = 'is_instagram_eligible'
is_reference_only = 'is_reference_only'
length = 'length'
live_audience_count = 'live_audience_count'
live_status = 'live_status'
music_video_copyright = 'music_video_copyright'
permalink_url = 'permalink_url'
picture = 'picture'
place = 'place'
premiere_living_room_status = 'premiere_living_room_status'
privacy = 'privacy'
published = 'published'
scheduled_publish_time = 'scheduled_publish_time'
source = 'source'
spherical = 'spherical'
status = 'status'
title = 'title'
universal_video_id = 'universal_video_id'
updated_time = 'updated_time'
adaptive_type = 'adaptive_type'
animated_effect_id = 'animated_effect_id'
application_id = 'application_id'
asked_fun_fact_prompt_id = 'asked_fun_fact_prompt_id'
audio_story_wave_animation_handle = 'audio_story_wave_animation_handle'
chunk_session_id = 'chunk_session_id'
composer_entry_picker = 'composer_entry_picker'
composer_entry_point = 'composer_entry_point'
composer_entry_time = 'composer_entry_time'
composer_session_events_log = 'composer_session_events_log'
composer_session_id = 'composer_session_id'
composer_source_surface = 'composer_source_surface'
composer_type = 'composer_type'
container_type = 'container_type'
creative_tools = 'creative_tools'
end_offset = 'end_offset'
fbuploader_video_file_chunk = 'fbuploader_video_file_chunk'
file_size = 'file_size'
file_url = 'file_url'
fisheye_video_cropped = 'fisheye_video_cropped'
formatting = 'formatting'
fov = 'fov'
front_z_rotation = 'front_z_rotation'
fun_fact_prompt_id = 'fun_fact_prompt_id'
fun_fact_toastee_id = 'fun_fact_toastee_id'
guide = 'guide'
guide_enabled = 'guide_enabled'
has_nickname = 'has_nickname'
holiday_card = 'holiday_card'
initial_heading = 'initial_heading'
initial_pitch = 'initial_pitch'
instant_game_entry_point_data = 'instant_game_entry_point_data'
is_boost_intended = 'is_boost_intended'
is_group_linking_post = 'is_group_linking_post'
is_voice_clip = 'is_voice_clip'
location_source_id = 'location_source_id'
name = 'name'
offer_like_post_id = 'offer_like_post_id'
og_action_type_id = 'og_action_type_id'
og_icon_id = 'og_icon_id'
og_object_id = 'og_object_id'
og_phrase = 'og_phrase'
og_suggestion_mechanism = 'og_suggestion_mechanism'
original_fov = 'original_fov'
original_projection_type = 'original_projection_type'
publish_event_id = 'publish_event_id'
react_mode_metadata = 'react_mode_metadata'
referenced_sticker_id = 'referenced_sticker_id'
replace_video_id = 'replace_video_id'
sales_promo_id = 'sales_promo_id'
slideshow_spec = 'slideshow_spec'
source_instagram_media_id = 'source_instagram_media_id'
start_offset = 'start_offset'
swap_mode = 'swap_mode'
text_format_metadata = 'text_format_metadata'
throwback_camera_roll_media = 'throwback_camera_roll_media'
thumb = 'thumb'
time_since_original_post = 'time_since_original_post'
transcode_setting_properties = 'transcode_setting_properties'
unpublished_content_type = 'unpublished_content_type'
upload_phase = 'upload_phase'
upload_session_id = 'upload_session_id'
upload_setting_properties = 'upload_setting_properties'
video_file_chunk = 'video_file_chunk'
video_id_original = 'video_id_original'
video_start_time_ms = 'video_start_time_ms'
waterfall_id = 'waterfall_id'
filename = 'filename'
filepath = 'filepath'
class ContainerType:
aco_autoextracted_video = 'ACO_AUTOEXTRACTED_VIDEO'
aco_video_variation = 'ACO_VIDEO_VARIATION'
ad_break_preview = 'AD_BREAK_PREVIEW'
ad_derivative = 'AD_DERIVATIVE'
age_up = 'AGE_UP'
album_multimedia_post = 'ALBUM_MULTIMEDIA_POST'
aloha_call_video = 'ALOHA_CALL_VIDEO'
aloha_superframe = 'ALOHA_SUPERFRAME'
app_review_screencast = 'APP_REVIEW_SCREENCAST'
atlas_video = 'ATLAS_VIDEO'
audio_broadcast = 'AUDIO_BROADCAST'
bell_poll = 'BELL_POLL'
brand_equity_poll_video = 'BRAND_EQUITY_POLL_VIDEO'
broadcast = 'BROADCAST'
candidate_videos = 'CANDIDATE_VIDEOS'
canvas = 'CANVAS'
cfc_video = 'CFC_VIDEO'
cms_media_manager = 'CMS_MEDIA_MANAGER'
contained_post_attachment = 'CONTAINED_POST_ATTACHMENT'
contained_post_audio_broadcast = 'CONTAINED_POST_AUDIO_BROADCAST'
contained_post_broadcast = 'CONTAINED_POST_BROADCAST'
copyright_reference_broadcast = 'COPYRIGHT_REFERENCE_BROADCAST'
copyright_reference_video = 'COPYRIGHT_REFERENCE_VIDEO'
cultural_moment_deprecated = 'CULTURAL_MOMENT_DEPRECATED'
dco_ad_asset_feed = 'DCO_AD_ASSET_FEED'
dco_autogen_video = 'DCO_AUTOGEN_VIDEO'
dco_trimmed_video = 'DCO_TRIMMED_VIDEO'
dim_sum = 'DIM_SUM'
directed_post_attachment = 'DIRECTED_POST_ATTACHMENT'
direct_inbox = 'DIRECT_INBOX'
direct_inbox_reaction = 'DIRECT_INBOX_REACTION'
dynamic_item_display_bundle = 'DYNAMIC_ITEM_DISPLAY_BUNDLE'
dynamic_item_video = 'DYNAMIC_ITEM_VIDEO'
dynamic_template_video = 'DYNAMIC_TEMPLATE_VIDEO'
event_cover_video = 'EVENT_COVER_VIDEO'
event_tour = 'EVENT_TOUR'
facecast_dvr = 'FACECAST_DVR'
fb_shorts = 'FB_SHORTS'
fb_shorts_group_post = 'FB_SHORTS_GROUP_POST'
fb_shorts_post = 'FB_SHORTS_POST'
fundraiser_cover_video = 'FUNDRAISER_COVER_VIDEO'
game_clip = 'GAME_CLIP'
gemstone = 'GEMSTONE'
goodwill_anniversary_deprecated = 'GOODWILL_ANNIVERSARY_DEPRECATED'
goodwill_anniversary_promotion_deprecated = 'GOODWILL_ANNIVERSARY_PROMOTION_DEPRECATED'
goodwill_video_contained_share = 'GOODWILL_VIDEO_CONTAINED_SHARE'
goodwill_video_promotion = 'GOODWILL_VIDEO_PROMOTION'
goodwill_video_share = 'GOODWILL_VIDEO_SHARE'
goodwill_video_token_required = 'GOODWILL_VIDEO_TOKEN_REQUIRED'
group_post = 'GROUP_POST'
heuristic_cluster_video = 'HEURISTIC_CLUSTER_VIDEO'
heuristic_preview = 'HEURISTIC_PREVIEW'
highlight_clip_video = 'HIGHLIGHT_CLIP_VIDEO'
ig_reels_xpv = 'IG_REELS_XPV'
ig_stories_reader = 'IG_STORIES_READER'
inspiration_video = 'INSPIRATION_VIDEO'
instagram_video_copy = 'INSTAGRAM_VIDEO_COPY'
instant_application_preview = 'INSTANT_APPLICATION_PREVIEW'
instant_article = 'INSTANT_ARTICLE'
instant_game_clip = 'INSTANT_GAME_CLIP'
issue_module = 'ISSUE_MODULE'
job_application_video = 'JOB_APPLICATION_VIDEO'
job_opening_video = 'JOB_OPENING_VIDEO'
kototoro = 'KOTOTORO'
learn = 'LEARN'
legacy = 'LEGACY'
live_creative_kit_video = 'LIVE_CREATIVE_KIT_VIDEO'
live_linear_video_channel_internal_broadcast = 'LIVE_LINEAR_VIDEO_CHANNEL_INTERNAL_BROADCAST'
live_photo = 'LIVE_PHOTO'
look_now_deprecated = 'LOOK_NOW_DEPRECATED'
marketplace_listing_video = 'MARKETPLACE_LISTING_VIDEO'
marketplace_pre_recorded_video = 'MARKETPLACE_PRE_RECORDED_VIDEO'
moments_video = 'MOMENTS_VIDEO'
neo_async_game_video = 'NEO_ASYNC_GAME_VIDEO'
no_story = 'NO_STORY'
no_story_with_entpost = 'NO_STORY_WITH_ENTPOST'
oculus_creator_portal = 'OCULUS_CREATOR_PORTAL'
oculus_venues_broadcast = 'OCULUS_VENUES_BROADCAST'
offers_video = 'OFFERS_VIDEO'
pages_cover_video = 'PAGES_COVER_VIDEO'
page_review_screencast = 'PAGE_REVIEW_SCREENCAST'
page_slideshow_video = 'PAGE_SLIDESHOW_VIDEO'
pixelcloud = 'PIXELCLOUD'
premiere_source = 'PREMIERE_SOURCE'
private_gallery_video = 'PRIVATE_GALLERY_VIDEO'
product_video = 'PRODUCT_VIDEO'
profile_cover_video = 'PROFILE_COVER_VIDEO'
profile_intro_card = 'PROFILE_INTRO_CARD'
profile_to_page_uploaded_video = 'PROFILE_TO_PAGE_UPLOADED_VIDEO'
profile_video = 'PROFILE_VIDEO'
proton = 'PROTON'
quick_promotion = 'QUICK_PROMOTION'
replace_video = 'REPLACE_VIDEO'
sales_client_interaction = 'SALES_CLIENT_INTERACTION'
say_thanks_deprecated = 'SAY_THANKS_DEPRECATED'
showreel_native_dummy_video = 'SHOWREEL_NATIVE_DUMMY_VIDEO'
slideshow_animoto = 'SLIDESHOW_ANIMOTO'
slideshow_shakr = 'SLIDESHOW_SHAKR'
slideshow_variation_video = 'SLIDESHOW_VARIATION_VIDEO'
sotto_content = 'SOTTO_CONTENT'
sound_platform_stream = 'SOUND_PLATFORM_STREAM'
stories_video = 'STORIES_VIDEO'
storyline = 'STORYLINE'
storyline_with_external_music = 'STORYLINE_WITH_EXTERNAL_MUSIC'
story_archive_video = 'STORY_ARCHIVE_VIDEO'
story_card_template = 'STORY_CARD_TEMPLATE'
stream_highlights_video = 'STREAM_HIGHLIGHTS_VIDEO'
tarot_digest = 'TAROT_DIGEST'
temp_multimedia_post = 'TEMP_MULTIMEDIA_POST'
unlisted = 'UNLISTED'
video_comment = 'VIDEO_COMMENT'
video_creative_editor_autogen_ad_video = 'VIDEO_CREATIVE_EDITOR_AUTOGEN_AD_VIDEO'
video_superres = 'VIDEO_SUPERRES'
vu_generated_video = 'VU_GENERATED_VIDEO'
woodhenge = 'WOODHENGE'
work_knowledge_video = 'WORK_KNOWLEDGE_VIDEO'
your_day = 'YOUR_DAY'
class ContentCategory:
beauty_fashion = 'BEAUTY_FASHION'
business = 'BUSINESS'
cars_trucks = 'CARS_TRUCKS'
comedy = 'COMEDY'
cute_animals = 'CUTE_ANIMALS'
entertainment = 'ENTERTAINMENT'
family = 'FAMILY'
food_health = 'FOOD_HEALTH'
home = 'HOME'
lifestyle = 'LIFESTYLE'
music = 'MUSIC'
news = 'NEWS'
other = 'OTHER'
politics = 'POLITICS'
science = 'SCIENCE'
sports = 'SPORTS'
technology = 'TECHNOLOGY'
video_gaming = 'VIDEO_GAMING'
class Formatting:
markdown = 'MARKDOWN'
plaintext = 'PLAINTEXT'
class OriginalProjectionType:
cubemap = 'cubemap'
equirectangular = 'equirectangular'
half_equirectangular = 'half_equirectangular'
class SwapMode:
replace = 'replace'
class UnpublishedContentType:
ads_post = 'ADS_POST'
draft = 'DRAFT'
inline_created = 'INLINE_CREATED'
published = 'PUBLISHED'
reviewable_branded_content = 'REVIEWABLE_BRANDED_CONTENT'
scheduled = 'SCHEDULED'
scheduled_recurring = 'SCHEDULED_RECURRING'
class UploadPhase:
cancel = 'cancel'
finish = 'finish'
start = 'start'
transfer = 'transfer'
class Type:
tagged = 'tagged'
uploaded = 'uploaded'
class BackdatedTimeGranularity:
day = 'day'
hour = 'hour'
min = 'min'
month = 'month'
none = 'none'
year = 'year'
@classmethod
def get_endpoint(cls):
return 'advideos'
def api_create(self, parent_id, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.adobjects.adaccount import AdAccount
return AdAccount(api=self._api, fbid=parent_id).create_ad_video(fields, params, batch, success, failure, pending)
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'ad_breaks': 'list',
'allow_bm_crossposting': 'bool',
'allow_crossposting_for_pages': 'list<Object>',
'backdated_time': 'datetime',
'backdated_time_granularity': 'backdated_time_granularity_enum',
'call_to_action': 'Object',
'content_category': 'content_category_enum',
'content_tags': 'list<string>',
'custom_labels': 'list<string>',
'description': 'string',
'direct_share_status': 'unsigned int',
'embeddable': 'bool',
'expiration': 'Object',
'expire_now': 'bool',
'increment_play_count': 'bool',
'name': 'string',
'preferred_thumbnail_id': 'string',
'privacy': 'string',
'publish_to_news_feed': 'bool',
'publish_to_videos_tab': 'bool',
'published': 'bool',
'scheduled_publish_time': 'unsigned int',
'social_actions': 'bool',
'sponsor_id': 'string',
'sponsor_relationship': 'unsigned int',
'tags': 'list<string>',
'target': 'string',
'universal_video_id': 'string',
}
enums = {
'backdated_time_granularity_enum': AdVideo.BackdatedTimeGranularity.__dict__.values(),
'content_category_enum': AdVideo.ContentCategory.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_captions(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/captions',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_caption(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'captions_file': 'file',
'default_locale': 'string',
'locales_to_delete': 'list<string>',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/captions',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_comments(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.comment import Comment
param_types = {
'filter': 'filter_enum',
'live_filter': 'live_filter_enum',
'order': 'order_enum',
'since': 'datetime',
}
enums = {
'filter_enum': Comment.Filter.__dict__.values(),
'live_filter_enum': Comment.LiveFilter.__dict__.values(),
'order_enum': Comment.Order.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/comments',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Comment,
api_type='EDGE',
response_parser=ObjectParser(target_class=Comment, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_comment(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.comment import Comment
param_types = {
'attachment_id': 'string',
'attachment_share_url': 'string',
'attachment_url': 'string',
'comment_privacy_value': 'comment_privacy_value_enum',
'facepile_mentioned_ids': 'list<string>',
'feedback_source': 'string',
'is_offline': 'bool',
'message': 'string',
'nectar_module': 'string',
'object_id': 'string',
'parent_comment_id': 'Object',
'text': 'string',
'tracking': 'string',
}
enums = {
'comment_privacy_value_enum': Comment.CommentPrivacyValue.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/comments',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Comment,
api_type='EDGE',
response_parser=ObjectParser(target_class=Comment, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_crosspost_shared_pages(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.page import Page
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/crosspost_shared_pages',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Page,
api_type='EDGE',
response_parser=ObjectParser(target_class=Page, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_likes(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.profile import Profile
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/likes',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Profile,
api_type='EDGE',
response_parser=ObjectParser(target_class=Profile, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_like(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'feedback_source': 'string',
'nectar_module': 'string',
'notify': 'bool',
'tracking': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/likes',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_poll_settings(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/poll_settings',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_polls(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videopoll import VideoPoll
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/polls',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VideoPoll,
api_type='EDGE',
response_parser=ObjectParser(target_class=VideoPoll, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_poll(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videopoll import VideoPoll
param_types = {
'close_after_voting': 'bool',
'correct_option': 'unsigned int',
'default_open': 'bool',
'options': 'list<string>',
'question': 'string',
'show_gradient': 'bool',
'show_results': 'bool',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/polls',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VideoPoll,
api_type='EDGE',
response_parser=ObjectParser(target_class=VideoPoll, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_sponsor_tags(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.page import Page
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/sponsor_tags',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Page,
api_type='EDGE',
response_parser=ObjectParser(target_class=Page, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_tags(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/tags',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_tag(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'tag_uid': 'int',
'uid': 'int',
'vid': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/tags',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_thumbnails(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videothumbnail import VideoThumbnail
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/thumbnails',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VideoThumbnail,
api_type='EDGE',
response_parser=ObjectParser(target_class=VideoThumbnail, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_thumbnail(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'is_preferred': 'bool',
'source': 'file',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/thumbnails',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdVideo,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdVideo, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_video_insights(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.insightsresult import InsightsResult
param_types = {
'metric': 'list<Object>',
'period': 'period_enum',
'since': 'datetime',
'until': 'datetime',
}
enums = {
'period_enum': InsightsResult.Period.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/video_insights',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=InsightsResult,
api_type='EDGE',
response_parser=ObjectParser(target_class=InsightsResult, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'ad_breaks': 'list<int>',
'backdated_time': 'datetime',
'backdated_time_granularity': 'string',
'content_category': 'string',
'content_tags': 'list<string>',
'copyright': 'VideoCopyright',
'copyright_monitoring_status': 'string',
'created_time': 'datetime',
'custom_labels': 'list<string>',
'description': 'string',
'embed_html': 'string',
'embeddable': 'bool',
'event': 'Event',
'expiration': 'Object',
'format': 'list<Object>',
'from': 'Object',
'icon': 'string',
'id': 'string',
'is_crosspost_video': 'bool',
'is_crossposting_eligible': 'bool',
'is_episode': 'bool',
'is_instagram_eligible': 'bool',
'is_reference_only': 'bool',
'length': 'float',
'live_audience_count': 'unsigned int',
'live_status': 'string',
'music_video_copyright': 'MusicVideoCopyright',
'permalink_url': 'string',
'picture': 'string',
'place': 'Place',
'premiere_living_room_status': 'string',
'privacy': 'Privacy',
'published': 'bool',
'scheduled_publish_time': 'datetime',
'source': 'string',
'spherical': 'bool',
'status': 'Object',
'title': 'string',
'universal_video_id': 'string',
'updated_time': 'datetime',
'adaptive_type': 'string',
'animated_effect_id': 'unsigned int',
'application_id': 'string',
'asked_fun_fact_prompt_id': 'unsigned int',
'audio_story_wave_animation_handle': 'string',
'chunk_session_id': 'string',
'composer_entry_picker': 'string',
'composer_entry_point': 'string',
'composer_entry_time': 'unsigned int',
'composer_session_events_log': 'string',
'composer_session_id': 'string',
'composer_source_surface': 'string',
'composer_type': 'string',
'container_type': 'ContainerType',
'creative_tools': 'string',
'end_offset': 'unsigned int',
'fbuploader_video_file_chunk': 'string',
'file_size': 'unsigned int',
'file_url': 'string',
'fisheye_video_cropped': 'bool',
'formatting': 'Formatting',
'fov': 'unsigned int',
'front_z_rotation': 'float',
'fun_fact_prompt_id': 'unsigned int',
'fun_fact_toastee_id': 'unsigned int',
'guide': 'list<list<unsigned int>>',
'guide_enabled': 'bool',
'has_nickname': 'bool',
'holiday_card': 'string',
'initial_heading': 'unsigned int',
'initial_pitch': 'unsigned int',
'instant_game_entry_point_data': 'string',
'is_boost_intended': 'bool',
'is_group_linking_post': 'bool',
'is_voice_clip': 'bool',
'location_source_id': 'string',
'name': 'string',
'offer_like_post_id': 'unsigned int',
'og_action_type_id': 'string',
'og_icon_id': 'string',
'og_object_id': 'string',
'og_phrase': 'string',
'og_suggestion_mechanism': 'string',
'original_fov': 'unsigned int',
'original_projection_type': 'OriginalProjectionType',
'publish_event_id': 'unsigned int',
'react_mode_metadata': 'string',
'referenced_sticker_id': 'string',
'replace_video_id': 'string',
'sales_promo_id': 'unsigned int',
'slideshow_spec': 'map',
'source_instagram_media_id': 'string',
'start_offset': 'unsigned int',
'swap_mode': 'SwapMode',
'text_format_metadata': 'string',
'throwback_camera_roll_media': 'string',
'thumb': 'file',
'time_since_original_post': 'unsigned int',
'transcode_setting_properties': 'string',
'unpublished_content_type': 'UnpublishedContentType',
'upload_phase': 'UploadPhase',
'upload_session_id': 'string',
'upload_setting_properties': 'string',
'video_file_chunk': 'string',
'video_id_original': 'string',
'video_start_time_ms': 'unsigned int',
'waterfall_id': 'string',
'filename': 'file'
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['ContainerType'] = AdVideo.ContainerType.__dict__.values()
field_enum_info['ContentCategory'] = AdVideo.ContentCategory.__dict__.values()
field_enum_info['Formatting'] = AdVideo.Formatting.__dict__.values()
field_enum_info['OriginalProjectionType'] = AdVideo.OriginalProjectionType.__dict__.values()
field_enum_info['SwapMode'] = AdVideo.SwapMode.__dict__.values()
field_enum_info['UnpublishedContentType'] = AdVideo.UnpublishedContentType.__dict__.values()
field_enum_info['UploadPhase'] = AdVideo.UploadPhase.__dict__.values()
field_enum_info['Type'] = AdVideo.Type.__dict__.values()
field_enum_info['BackdatedTimeGranularity'] = AdVideo.BackdatedTimeGranularity.__dict__.values()
return field_enum_info
def remote_create(
self,
batch=None,
failure=None,
params=None,
success=None,
):
from facebook_business.exceptions import FacebookBadObjectError
from facebook_business.video_uploader import (
VideoUploader,
VideoUploadRequest,
)
if (self.Field.slideshow_spec in self and
self[self.Field.slideshow_spec] is not None):
request = VideoUploadRequest(self.get_api_assured())
request.setParams(params={'slideshow_spec': {
'images_urls': self[self.Field.slideshow_spec]['images_urls'],
'duration_ms': self[self.Field.slideshow_spec]['duration_ms'],
'transition_ms': self[self.Field.slideshow_spec]['transition_ms'],
}})
response = request.send((self.get_parent_id_assured(), 'advideos')).json()
elif not (self.Field.filepath in self):
raise FacebookBadObjectError(
"AdVideo requires a filepath or slideshow_spec to be defined.",
)
else:
video_uploader = VideoUploader()
response = video_uploader.upload(self)
self._set_data(response)
return response
def waitUntilEncodingReady(self, interval=30, timeout=600):
from facebook_business.video_uploader import VideoEncodingStatusChecker
from facebook_business.exceptions import FacebookError
if 'id' not in self:
raise FacebookError(
'Invalid Video ID',
)
VideoEncodingStatusChecker.waitUntilReady(
self.get_api_assured(),
self['id'],
interval,
timeout,
)
| true | true |
f71bd3a0366aa96de99eb6814ff7287178cc8592 | 1,849 | py | Python | solver.py | gtxn/wordle-solver | 48e8317bd1930884fa65d5bd0559b0341af456b6 | [
"MIT"
] | 1 | 2022-02-25T06:11:26.000Z | 2022-02-25T06:11:26.000Z | solver.py | gtxn/wordle-solver | 48e8317bd1930884fa65d5bd0559b0341af456b6 | [
"MIT"
] | null | null | null | solver.py | gtxn/wordle-solver | 48e8317bd1930884fa65d5bd0559b0341af456b6 | [
"MIT"
] | null | null | null | from multiprocessing.sharedctypes import Value
from global_vars import *
from utils import *
def state_inp_to_str(state_inp):
state_str = ''
for ch in state_inp:
state_str += str((['b', 'y', 'g'].index(ch)))
return state_str
def get_state_input():
valid = False
while not valid:
valid = True
state_inp = input(
"Fill in colours in the following format: b for black, y for yellow and g for green. For example, if a word had no correct letters, the input would be 'bbbbb': ").strip().lower()
if len(state_inp) != 5:
print("Please make sure exactly 5 colours were put.")
valid = False
for ch in state_inp:
if ch not in 'byg':
print("Only letters b, y, and g are allowed")
valid = False
print()
state_str = state_inp_to_str(state_inp)
return state_str
def get_confirm(msg):
yes_no = input(
f"{msg} (y/n): ").strip().lower()
if yes_no == 'y':
return True
elif yes_no == 'n':
return False
print("Invalid input.")
get_confirm(msg)
print('='*9)
rnd = 1
did_win = False
avail_words = GUESS_ARR
while rnd <= 5 and not did_win:
guess_rec, entropy = get_guess(avail_words)
print(f"Recommended word: {guess_rec.upper()}")
did_follow_rec = get_confirm('Did you follow the recommended word?')
if not did_follow_rec:
guess = input("What word did you input: ")
else:
guess = guess_rec
did_win = get_confirm('Did you win?')
if did_win:
break
print()
state = get_state_input()
avail_words = set(STATE_MAP[guess][state]) & set(avail_words)
print("---")
rnd += 1
if did_win:
print('Congrats! You won!')
else:
print('No... Sorry about that. Better luck next time!')
| 22.277108 | 190 | 0.60411 | from multiprocessing.sharedctypes import Value
from global_vars import *
from utils import *
def state_inp_to_str(state_inp):
state_str = ''
for ch in state_inp:
state_str += str((['b', 'y', 'g'].index(ch)))
return state_str
def get_state_input():
valid = False
while not valid:
valid = True
state_inp = input(
"Fill in colours in the following format: b for black, y for yellow and g for green. For example, if a word had no correct letters, the input would be 'bbbbb': ").strip().lower()
if len(state_inp) != 5:
print("Please make sure exactly 5 colours were put.")
valid = False
for ch in state_inp:
if ch not in 'byg':
print("Only letters b, y, and g are allowed")
valid = False
print()
state_str = state_inp_to_str(state_inp)
return state_str
def get_confirm(msg):
yes_no = input(
f"{msg} (y/n): ").strip().lower()
if yes_no == 'y':
return True
elif yes_no == 'n':
return False
print("Invalid input.")
get_confirm(msg)
print('='*9)
rnd = 1
did_win = False
avail_words = GUESS_ARR
while rnd <= 5 and not did_win:
guess_rec, entropy = get_guess(avail_words)
print(f"Recommended word: {guess_rec.upper()}")
did_follow_rec = get_confirm('Did you follow the recommended word?')
if not did_follow_rec:
guess = input("What word did you input: ")
else:
guess = guess_rec
did_win = get_confirm('Did you win?')
if did_win:
break
print()
state = get_state_input()
avail_words = set(STATE_MAP[guess][state]) & set(avail_words)
print("---")
rnd += 1
if did_win:
print('Congrats! You won!')
else:
print('No... Sorry about that. Better luck next time!')
| true | true |
f71bd4c71e4fbf7580c2aedbde090cc21504f482 | 1,367 | py | Python | mysite/urls.py | vansjyo/OSVI-RemoteControl | 6d3dd6aa1cceac2254171d57b33975df08cda2a8 | [
"MIT"
] | null | null | null | mysite/urls.py | vansjyo/OSVI-RemoteControl | 6d3dd6aa1cceac2254171d57b33975df08cda2a8 | [
"MIT"
] | null | null | null | mysite/urls.py | vansjyo/OSVI-RemoteControl | 6d3dd6aa1cceac2254171d57b33975df08cda2a8 | [
"MIT"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('runcode/', include('runcode.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from django.conf import settings
from django.views.static import serve
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('',TemplateView.as_view(template_name='home.html'), name='home'),
path('runcode/', include('runcode.urls')),
]#+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
re_path(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
| 37.972222 | 77 | 0.702999 | from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from django.conf import settings
from django.views.static import serve
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('',TemplateView.as_view(template_name='home.html'), name='home'),
path('runcode/', include('runcode.urls')),
]
urlpatterns += [
re_path(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
| true | true |
f71bd4cab6d4e1f08fe404d5664c23ec2a6f827b | 319 | py | Python | openapi2ceres/main.py | laulin/openapi2ceres | 16622d399b0baed0159f62bd44f405ab2565126c | [
"Apache-2.0"
] | null | null | null | openapi2ceres/main.py | laulin/openapi2ceres | 16622d399b0baed0159f62bd44f405ab2565126c | [
"Apache-2.0"
] | null | null | null | openapi2ceres/main.py | laulin/openapi2ceres | 16622d399b0baed0159f62bd44f405ab2565126c | [
"Apache-2.0"
] | null | null | null | from pprint import pprint
from .args import get_args
from .openapifile import OpenAPIFile
from .ceresfile import CeresFile
def main():
args = get_args()
input_file = OpenAPIFile()
input_file.load(args.input)
output_producer = CeresFile(input_file, args.output_dir)
output_producer.process()
| 22.785714 | 60 | 0.742947 | from pprint import pprint
from .args import get_args
from .openapifile import OpenAPIFile
from .ceresfile import CeresFile
def main():
args = get_args()
input_file = OpenAPIFile()
input_file.load(args.input)
output_producer = CeresFile(input_file, args.output_dir)
output_producer.process()
| true | true |
f71bd4d65ed44d77bfd69c5414311187e9b51c70 | 1,447 | py | Python | setup.py | fusion-energy/openmc_mesh_tally_to_vtk | b0a9077da79363dbf758d951ca68e5e5365d09ad | [
"MIT"
] | null | null | null | setup.py | fusion-energy/openmc_mesh_tally_to_vtk | b0a9077da79363dbf758d951ca68e5e5365d09ad | [
"MIT"
] | 3 | 2021-11-19T23:32:23.000Z | 2022-02-15T19:58:40.000Z | setup.py | fusion-energy/openmc_mesh_tally_to_vtk | b0a9077da79363dbf758d951ca68e5e5365d09ad | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="openmc_mesh_tally_to_vtk",
version="develop",
author="The Regular Mesh Plotter Development Team",
author_email="mail@jshimwell.com",
description="A Python package for converting OpenMC mesh tallies to VTK files and optionally converting the units",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fusion-energy/openmc_mesh_tally_to_vtk",
packages=setuptools.find_packages(),
classifiers=[
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
package_data={
"openmc_mesh_tally_to_vtk": [
# "requirements.txt",
"README.md",
"LICENSE",
]
},
install_requires=[
"numpy>=1.21.1",
"matplotlib>=3.4.2",
"trimesh",
"shapely",
"scipy",
"dagmc_geometry_slice_plotter",
"openmc_tally_unit_converter",
"vtk",
],
)
| 31.456522 | 119 | 0.612301 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="openmc_mesh_tally_to_vtk",
version="develop",
author="The Regular Mesh Plotter Development Team",
author_email="mail@jshimwell.com",
description="A Python package for converting OpenMC mesh tallies to VTK files and optionally converting the units",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fusion-energy/openmc_mesh_tally_to_vtk",
packages=setuptools.find_packages(),
classifiers=[
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
package_data={
"openmc_mesh_tally_to_vtk": [
"README.md",
"LICENSE",
]
},
install_requires=[
"numpy>=1.21.1",
"matplotlib>=3.4.2",
"trimesh",
"shapely",
"scipy",
"dagmc_geometry_slice_plotter",
"openmc_tally_unit_converter",
"vtk",
],
)
| true | true |
f71bd54ef148217f90cdec13b843fb943c0543ba | 18,592 | py | Python | saleor/order/models.py | vantrong291/saleor | d4820d53f7f9825510c7ea0c41cacbda6b612452 | [
"CC-BY-4.0"
] | null | null | null | saleor/order/models.py | vantrong291/saleor | d4820d53f7f9825510c7ea0c41cacbda6b612452 | [
"CC-BY-4.0"
] | null | null | null | saleor/order/models.py | vantrong291/saleor | d4820d53f7f9825510c7ea0c41cacbda6b612452 | [
"CC-BY-4.0"
] | null | null | null | from decimal import Decimal
from operator import attrgetter
from uuid import uuid4
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import F, Max, Sum
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy
from django_measurement.models import MeasurementField
from django_prices.models import MoneyField, TaxedMoneyField
from measurement.measures import Weight
from prices import Money
from ..account.models import Address
from ..core.taxes import zero_money, zero_taxed_money
from ..core.utils.json_serializer import CustomJsonEncoder
from ..core.weight import WeightUnits, zero_weight
from ..discount.models import Voucher
from ..giftcard.models import GiftCard
from ..payment import ChargeStatus, TransactionKind
from ..shipping.models import ShippingMethod
from . import FulfillmentStatus, OrderEvents, OrderStatus
class OrderQueryset(models.QuerySet):
def confirmed(self):
"""Return non-draft orders."""
return self.exclude(status=OrderStatus.DRAFT)
def drafts(self):
"""Return draft orders."""
return self.filter(status=OrderStatus.DRAFT)
def ready_to_fulfill(self):
"""Return orders that can be fulfilled.
Orders ready to fulfill are fully paid but unfulfilled (or partially
fulfilled).
"""
statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}
qs = self.filter(status__in=statuses, payments__is_active=True)
qs = qs.annotate(amount_paid=Sum("payments__captured_amount"))
return qs.filter(total_gross_amount__lte=F("amount_paid"))
def ready_to_capture(self):
"""Return orders with payments to capture.
Orders ready to capture are those which are not draft or canceled and
have a preauthorized payment. The preauthorized payment can not
already be partially or fully captured.
"""
qs = self.filter(
payments__is_active=True, payments__charge_status=ChargeStatus.NOT_CHARGED
)
qs = qs.exclude(status={OrderStatus.DRAFT, OrderStatus.CANCELED})
return qs.distinct()
class Order(models.Model):
created = models.DateTimeField(default=now, editable=False)
status = models.CharField(
max_length=32, default=OrderStatus.UNFULFILLED, choices=OrderStatus.CHOICES
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="orders",
on_delete=models.SET_NULL,
)
language_code = models.CharField(max_length=35, default=settings.LANGUAGE_CODE)
tracking_client_id = models.CharField(max_length=36, blank=True, editable=False)
billing_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
shipping_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
user_email = models.EmailField(blank=True, default="")
currency = models.CharField(
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
default=settings.DEFAULT_CURRENCY,
)
shipping_method = models.ForeignKey(
ShippingMethod,
blank=True,
null=True,
related_name="orders",
on_delete=models.SET_NULL,
)
shipping_method_name = models.CharField(
max_length=255, null=True, default=None, blank=True, editable=False
)
shipping_price_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
editable=False,
)
shipping_price_net = MoneyField(
amount_field="shipping_price_net_amount", currency_field="currency"
)
shipping_price_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
editable=False,
)
shipping_price_gross = MoneyField(
amount_field="shipping_price_gross_amount", currency_field="currency"
)
shipping_price = TaxedMoneyField(
net_amount_field="shipping_price_net_amount",
gross_amount_field="shipping_price_gross_amount",
currency_field="currency",
)
token = models.CharField(max_length=36, unique=True, blank=True)
# Token of a checkout instance that this order was created from
checkout_token = models.CharField(max_length=36, blank=True)
total_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
total_net = MoneyField(amount_field="total_net_amount", currency_field="currency")
total_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
total_gross = MoneyField(
amount_field="total_gross_amount", currency_field="currency"
)
total = TaxedMoneyField(
net_amount_field="total_net_amount",
gross_amount_field="total_gross_amount",
currency_field="currency",
)
voucher = models.ForeignKey(
Voucher, blank=True, null=True, related_name="+", on_delete=models.SET_NULL
)
gift_cards = models.ManyToManyField(GiftCard, blank=True, related_name="orders")
discount_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
discount = MoneyField(amount_field="discount_amount", currency_field="currency")
discount_name = models.CharField(max_length=255, default="", blank=True)
translated_discount_name = models.CharField(max_length=255, default="", blank=True)
display_gross_prices = models.BooleanField(default=True)
customer_note = models.TextField(blank=True, default="")
weight = MeasurementField(
measurement=Weight, unit_choices=WeightUnits.CHOICES, default=zero_weight
)
objects = OrderQueryset.as_manager()
class Meta:
ordering = ("-pk",)
permissions = (
(
"manage_orders",
pgettext_lazy("Permission description", "Manage orders."),
),
)
def save(self, *args, **kwargs):
if not self.token:
self.token = str(uuid4())
return super().save(*args, **kwargs)
def is_fully_paid(self):
total_paid = self._total_paid()
return total_paid.gross >= self.total.gross
def is_partly_paid(self):
total_paid = self._total_paid()
return total_paid.gross.amount > 0
def get_customer_email(self):
return self.user.email if self.user else self.user_email
def _total_paid(self):
# Get total paid amount from partially charged,
# fully charged and partially refunded payments
payments = self.payments.filter(
charge_status__in=[
ChargeStatus.PARTIALLY_CHARGED,
ChargeStatus.FULLY_CHARGED,
ChargeStatus.PARTIALLY_REFUNDED,
]
)
total_captured = [payment.get_captured_amount() for payment in payments]
total_paid = sum(total_captured, zero_taxed_money())
return total_paid
def _index_billing_phone(self):
return self.billing_address.phone
def _index_shipping_phone(self):
return self.shipping_address.phone
def __iter__(self):
return iter(self.lines.all())
def __repr__(self):
return "<Order #%r>" % (self.id,)
def __str__(self):
return "#%d" % (self.id,)
def get_absolute_url(self):
return reverse("order:details", kwargs={"token": self.token})
def get_last_payment(self):
return max(self.payments.all(), default=None, key=attrgetter("pk"))
def get_payment_status(self):
last_payment = self.get_last_payment()
if last_payment:
return last_payment.charge_status
return ChargeStatus.NOT_CHARGED
def get_payment_status_display(self):
last_payment = self.get_last_payment()
if last_payment:
return last_payment.get_charge_status_display()
return dict(ChargeStatus.CHOICES).get(ChargeStatus.NOT_CHARGED)
def is_pre_authorized(self):
return (
self.payments.filter(
is_active=True, transactions__kind=TransactionKind.AUTH
)
.filter(transactions__is_success=True)
.exists()
)
@property
def quantity_fulfilled(self):
return sum([line.quantity_fulfilled for line in self])
def is_shipping_required(self):
return any(line.is_shipping_required for line in self)
def get_subtotal(self):
subtotal_iterator = (line.get_total() for line in self)
return sum(subtotal_iterator, zero_taxed_money())
def get_total_quantity(self):
return sum([line.quantity for line in self])
def is_draft(self):
return self.status == OrderStatus.DRAFT
def is_open(self):
statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}
return self.status in statuses
def can_cancel(self):
return self.status not in {OrderStatus.CANCELED, OrderStatus.DRAFT}
def can_capture(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
order_status_ok = self.status not in {OrderStatus.DRAFT, OrderStatus.CANCELED}
return payment.can_capture() and order_status_ok
def can_charge(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
order_status_ok = self.status not in {OrderStatus.DRAFT, OrderStatus.CANCELED}
return payment.can_charge() and order_status_ok
def can_void(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
return payment.can_void()
def can_refund(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
return payment.can_refund()
def can_mark_as_paid(self):
return len(self.payments.all()) == 0
@property
def total_authorized(self):
payment = self.get_last_payment()
if payment:
return payment.get_authorized_amount()
return zero_money()
@property
def total_captured(self):
payment = self.get_last_payment()
if payment and payment.charge_status in (
ChargeStatus.PARTIALLY_CHARGED,
ChargeStatus.FULLY_CHARGED,
ChargeStatus.PARTIALLY_REFUNDED,
):
return Money(payment.captured_amount, payment.currency)
return zero_money()
@property
def total_balance(self):
return self.total_captured - self.total.gross
def get_total_weight(self):
return self.weight
def product_to_string(self):
str_product_result = ""
for line in self.lines.all():
str_product_result = str_product_result + " " + str(line.product_name) + " " + str(line.variant_name) + ", "
return str_product_result
def product_to_list(self):
list_product_result = []
for line in self.lines.all():
list_product_result.append(line.product_name)
return list_product_result
def variant_to_list(self):
list_variant_result = []
for line in self.lines.all():
list_variant_result.append({"variant": line.product_name + " " + line.variant_name, "quantity": line.variant.quantity, "quantity_allocated": line.variant.quantity_allocated})
return list_variant_result
def product_category_to_list(self):
list_product_category_result = []
for line in self.lines.all():
list_product_category_result.append(line.variant.product.category.name)
return list_product_category_result
def product_type_to_list(self):
list_product_type_result = []
for line in self.lines.all():
list_product_type_result.append(line.variant.product.product_type.name)
return list_product_type_result
def variant_to_string(self):
str_variant_result = ""
for line in self.lines.all():
str_variant_result = str_variant_result + " " + str(line.variant_name)
return str_variant_result
class OrderLineQueryset(models.QuerySet):
def digital(self):
"""Return lines with digital products."""
for line in self.all():
if line.is_digital:
yield line
def physical(self):
"""Return lines with physical products."""
for line in self.all():
if not line.is_digital:
yield line
class OrderLine(models.Model):
order = models.ForeignKey(
Order, related_name="lines", editable=False, on_delete=models.CASCADE
)
variant = models.ForeignKey(
"product.ProductVariant",
related_name="order_lines",
on_delete=models.SET_NULL,
blank=True,
null=True,
)
# max_length is as produced by ProductVariant's display_product method
product_name = models.CharField(max_length=386)
variant_name = models.CharField(max_length=255, default="", blank=True)
translated_product_name = models.CharField(max_length=386, default="", blank=True)
translated_variant_name = models.CharField(max_length=255, default="", blank=True)
product_sku = models.CharField(max_length=32)
is_shipping_required = models.BooleanField()
quantity = models.IntegerField(validators=[MinValueValidator(1)])
quantity_fulfilled = models.IntegerField(
validators=[MinValueValidator(0)], default=0
)
currency = models.CharField(
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
default=settings.DEFAULT_CURRENCY,
)
unit_price_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
unit_price_net = MoneyField(
amount_field="unit_price_net_amount", currency_field="currency"
)
unit_price_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
unit_price_gross = MoneyField(
amount_field="unit_price_gross_amount", currency_field="currency"
)
unit_price = TaxedMoneyField(
net_amount_field="unit_price_net_amount",
gross_amount_field="unit_price_gross_amount",
currency="currency",
)
tax_rate = models.DecimalField(
max_digits=5, decimal_places=2, default=Decimal("0.0")
)
objects = OrderLineQueryset.as_manager()
class Meta:
ordering = ("pk",)
def __str__(self):
return (
f"{self.product_name} ({self.variant_name})"
if self.variant_name
else self.product_name
)
def get_total(self):
return self.unit_price * self.quantity
@property
def quantity_unfulfilled(self):
return self.quantity - self.quantity_fulfilled
@property
def is_digital(self) -> bool:
"""Check if a variant is digital and contains digital content."""
is_digital = self.variant.is_digital()
has_digital = hasattr(self.variant, "digital_content")
return is_digital and has_digital
class Fulfillment(models.Model):
fulfillment_order = models.PositiveIntegerField(editable=False)
order = models.ForeignKey(
Order, related_name="fulfillments", editable=False, on_delete=models.CASCADE
)
status = models.CharField(
max_length=32,
default=FulfillmentStatus.FULFILLED,
choices=FulfillmentStatus.CHOICES,
)
tracking_number = models.CharField(max_length=255, default="", blank=True)
shipping_date = models.DateTimeField(default=now, editable=False)
def __str__(self):
return pgettext_lazy("Fulfillment str", "Fulfillment #%s") % (self.composed_id,)
def __iter__(self):
return iter(self.lines.all())
def save(self, *args, **kwargs):
"""Assign an auto incremented value as a fulfillment order."""
if not self.pk:
groups = self.order.fulfillments.all()
existing_max = groups.aggregate(Max("fulfillment_order"))
existing_max = existing_max.get("fulfillment_order__max")
self.fulfillment_order = existing_max + 1 if existing_max is not None else 1
return super().save(*args, **kwargs)
@property
def composed_id(self):
return "%s-%s" % (self.order.id, self.fulfillment_order)
def can_edit(self):
return self.status != FulfillmentStatus.CANCELED
def get_total_quantity(self):
return sum([line.quantity for line in self])
class FulfillmentLine(models.Model):
order_line = models.ForeignKey(
OrderLine, related_name="+", on_delete=models.CASCADE
)
fulfillment = models.ForeignKey(
Fulfillment, related_name="lines", on_delete=models.CASCADE
)
quantity = models.PositiveIntegerField()
class OrderEvent(models.Model):
"""Model used to store events that happened during the order lifecycle.
Args:
parameters: Values needed to display the event on the storefront
type: Type of an order
"""
date = models.DateTimeField(default=now, editable=False)
type = models.CharField(
max_length=255,
choices=[
(type_name.upper(), type_name) for type_name, _ in OrderEvents.CHOICES
],
)
order = models.ForeignKey(Order, related_name="events", on_delete=models.CASCADE)
parameters = JSONField(blank=True, default=dict, encoder=CustomJsonEncoder)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
class Meta:
ordering = ("date",)
def __repr__(self):
return f"{self.__class__.__name__}(type={self.type!r}, user={self.user!r})"
| 34.113761 | 186 | 0.677765 | from decimal import Decimal
from operator import attrgetter
from uuid import uuid4
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import F, Max, Sum
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy
from django_measurement.models import MeasurementField
from django_prices.models import MoneyField, TaxedMoneyField
from measurement.measures import Weight
from prices import Money
from ..account.models import Address
from ..core.taxes import zero_money, zero_taxed_money
from ..core.utils.json_serializer import CustomJsonEncoder
from ..core.weight import WeightUnits, zero_weight
from ..discount.models import Voucher
from ..giftcard.models import GiftCard
from ..payment import ChargeStatus, TransactionKind
from ..shipping.models import ShippingMethod
from . import FulfillmentStatus, OrderEvents, OrderStatus
class OrderQueryset(models.QuerySet):
def confirmed(self):
return self.exclude(status=OrderStatus.DRAFT)
def drafts(self):
return self.filter(status=OrderStatus.DRAFT)
def ready_to_fulfill(self):
statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}
qs = self.filter(status__in=statuses, payments__is_active=True)
qs = qs.annotate(amount_paid=Sum("payments__captured_amount"))
return qs.filter(total_gross_amount__lte=F("amount_paid"))
def ready_to_capture(self):
qs = self.filter(
payments__is_active=True, payments__charge_status=ChargeStatus.NOT_CHARGED
)
qs = qs.exclude(status={OrderStatus.DRAFT, OrderStatus.CANCELED})
return qs.distinct()
class Order(models.Model):
created = models.DateTimeField(default=now, editable=False)
status = models.CharField(
max_length=32, default=OrderStatus.UNFULFILLED, choices=OrderStatus.CHOICES
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="orders",
on_delete=models.SET_NULL,
)
language_code = models.CharField(max_length=35, default=settings.LANGUAGE_CODE)
tracking_client_id = models.CharField(max_length=36, blank=True, editable=False)
billing_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
shipping_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
user_email = models.EmailField(blank=True, default="")
currency = models.CharField(
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
default=settings.DEFAULT_CURRENCY,
)
shipping_method = models.ForeignKey(
ShippingMethod,
blank=True,
null=True,
related_name="orders",
on_delete=models.SET_NULL,
)
shipping_method_name = models.CharField(
max_length=255, null=True, default=None, blank=True, editable=False
)
shipping_price_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
editable=False,
)
shipping_price_net = MoneyField(
amount_field="shipping_price_net_amount", currency_field="currency"
)
shipping_price_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
editable=False,
)
shipping_price_gross = MoneyField(
amount_field="shipping_price_gross_amount", currency_field="currency"
)
shipping_price = TaxedMoneyField(
net_amount_field="shipping_price_net_amount",
gross_amount_field="shipping_price_gross_amount",
currency_field="currency",
)
token = models.CharField(max_length=36, unique=True, blank=True)
checkout_token = models.CharField(max_length=36, blank=True)
total_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
total_net = MoneyField(amount_field="total_net_amount", currency_field="currency")
total_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
total_gross = MoneyField(
amount_field="total_gross_amount", currency_field="currency"
)
total = TaxedMoneyField(
net_amount_field="total_net_amount",
gross_amount_field="total_gross_amount",
currency_field="currency",
)
voucher = models.ForeignKey(
Voucher, blank=True, null=True, related_name="+", on_delete=models.SET_NULL
)
gift_cards = models.ManyToManyField(GiftCard, blank=True, related_name="orders")
discount_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
discount = MoneyField(amount_field="discount_amount", currency_field="currency")
discount_name = models.CharField(max_length=255, default="", blank=True)
translated_discount_name = models.CharField(max_length=255, default="", blank=True)
display_gross_prices = models.BooleanField(default=True)
customer_note = models.TextField(blank=True, default="")
weight = MeasurementField(
measurement=Weight, unit_choices=WeightUnits.CHOICES, default=zero_weight
)
objects = OrderQueryset.as_manager()
class Meta:
ordering = ("-pk",)
permissions = (
(
"manage_orders",
pgettext_lazy("Permission description", "Manage orders."),
),
)
def save(self, *args, **kwargs):
if not self.token:
self.token = str(uuid4())
return super().save(*args, **kwargs)
def is_fully_paid(self):
total_paid = self._total_paid()
return total_paid.gross >= self.total.gross
def is_partly_paid(self):
total_paid = self._total_paid()
return total_paid.gross.amount > 0
def get_customer_email(self):
return self.user.email if self.user else self.user_email
def _total_paid(self):
payments = self.payments.filter(
charge_status__in=[
ChargeStatus.PARTIALLY_CHARGED,
ChargeStatus.FULLY_CHARGED,
ChargeStatus.PARTIALLY_REFUNDED,
]
)
total_captured = [payment.get_captured_amount() for payment in payments]
total_paid = sum(total_captured, zero_taxed_money())
return total_paid
def _index_billing_phone(self):
return self.billing_address.phone
def _index_shipping_phone(self):
return self.shipping_address.phone
def __iter__(self):
return iter(self.lines.all())
def __repr__(self):
return "<Order #%r>" % (self.id,)
def __str__(self):
return "#%d" % (self.id,)
def get_absolute_url(self):
return reverse("order:details", kwargs={"token": self.token})
def get_last_payment(self):
return max(self.payments.all(), default=None, key=attrgetter("pk"))
def get_payment_status(self):
last_payment = self.get_last_payment()
if last_payment:
return last_payment.charge_status
return ChargeStatus.NOT_CHARGED
def get_payment_status_display(self):
last_payment = self.get_last_payment()
if last_payment:
return last_payment.get_charge_status_display()
return dict(ChargeStatus.CHOICES).get(ChargeStatus.NOT_CHARGED)
def is_pre_authorized(self):
return (
self.payments.filter(
is_active=True, transactions__kind=TransactionKind.AUTH
)
.filter(transactions__is_success=True)
.exists()
)
@property
def quantity_fulfilled(self):
return sum([line.quantity_fulfilled for line in self])
def is_shipping_required(self):
return any(line.is_shipping_required for line in self)
def get_subtotal(self):
subtotal_iterator = (line.get_total() for line in self)
return sum(subtotal_iterator, zero_taxed_money())
def get_total_quantity(self):
return sum([line.quantity for line in self])
def is_draft(self):
return self.status == OrderStatus.DRAFT
def is_open(self):
statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}
return self.status in statuses
def can_cancel(self):
return self.status not in {OrderStatus.CANCELED, OrderStatus.DRAFT}
def can_capture(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
order_status_ok = self.status not in {OrderStatus.DRAFT, OrderStatus.CANCELED}
return payment.can_capture() and order_status_ok
def can_charge(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
order_status_ok = self.status not in {OrderStatus.DRAFT, OrderStatus.CANCELED}
return payment.can_charge() and order_status_ok
def can_void(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
return payment.can_void()
def can_refund(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
return payment.can_refund()
def can_mark_as_paid(self):
return len(self.payments.all()) == 0
@property
def total_authorized(self):
payment = self.get_last_payment()
if payment:
return payment.get_authorized_amount()
return zero_money()
@property
def total_captured(self):
payment = self.get_last_payment()
if payment and payment.charge_status in (
ChargeStatus.PARTIALLY_CHARGED,
ChargeStatus.FULLY_CHARGED,
ChargeStatus.PARTIALLY_REFUNDED,
):
return Money(payment.captured_amount, payment.currency)
return zero_money()
@property
def total_balance(self):
return self.total_captured - self.total.gross
def get_total_weight(self):
return self.weight
def product_to_string(self):
str_product_result = ""
for line in self.lines.all():
str_product_result = str_product_result + " " + str(line.product_name) + " " + str(line.variant_name) + ", "
return str_product_result
def product_to_list(self):
list_product_result = []
for line in self.lines.all():
list_product_result.append(line.product_name)
return list_product_result
def variant_to_list(self):
list_variant_result = []
for line in self.lines.all():
list_variant_result.append({"variant": line.product_name + " " + line.variant_name, "quantity": line.variant.quantity, "quantity_allocated": line.variant.quantity_allocated})
return list_variant_result
def product_category_to_list(self):
list_product_category_result = []
for line in self.lines.all():
list_product_category_result.append(line.variant.product.category.name)
return list_product_category_result
def product_type_to_list(self):
list_product_type_result = []
for line in self.lines.all():
list_product_type_result.append(line.variant.product.product_type.name)
return list_product_type_result
def variant_to_string(self):
str_variant_result = ""
for line in self.lines.all():
str_variant_result = str_variant_result + " " + str(line.variant_name)
return str_variant_result
class OrderLineQueryset(models.QuerySet):
def digital(self):
for line in self.all():
if line.is_digital:
yield line
def physical(self):
for line in self.all():
if not line.is_digital:
yield line
class OrderLine(models.Model):
order = models.ForeignKey(
Order, related_name="lines", editable=False, on_delete=models.CASCADE
)
variant = models.ForeignKey(
"product.ProductVariant",
related_name="order_lines",
on_delete=models.SET_NULL,
blank=True,
null=True,
)
product_name = models.CharField(max_length=386)
variant_name = models.CharField(max_length=255, default="", blank=True)
translated_product_name = models.CharField(max_length=386, default="", blank=True)
translated_variant_name = models.CharField(max_length=255, default="", blank=True)
product_sku = models.CharField(max_length=32)
is_shipping_required = models.BooleanField()
quantity = models.IntegerField(validators=[MinValueValidator(1)])
quantity_fulfilled = models.IntegerField(
validators=[MinValueValidator(0)], default=0
)
currency = models.CharField(
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
default=settings.DEFAULT_CURRENCY,
)
unit_price_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
unit_price_net = MoneyField(
amount_field="unit_price_net_amount", currency_field="currency"
)
unit_price_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
unit_price_gross = MoneyField(
amount_field="unit_price_gross_amount", currency_field="currency"
)
unit_price = TaxedMoneyField(
net_amount_field="unit_price_net_amount",
gross_amount_field="unit_price_gross_amount",
currency="currency",
)
tax_rate = models.DecimalField(
max_digits=5, decimal_places=2, default=Decimal("0.0")
)
objects = OrderLineQueryset.as_manager()
class Meta:
ordering = ("pk",)
def __str__(self):
return (
f"{self.product_name} ({self.variant_name})"
if self.variant_name
else self.product_name
)
def get_total(self):
return self.unit_price * self.quantity
@property
def quantity_unfulfilled(self):
return self.quantity - self.quantity_fulfilled
@property
def is_digital(self) -> bool:
is_digital = self.variant.is_digital()
has_digital = hasattr(self.variant, "digital_content")
return is_digital and has_digital
class Fulfillment(models.Model):
fulfillment_order = models.PositiveIntegerField(editable=False)
order = models.ForeignKey(
Order, related_name="fulfillments", editable=False, on_delete=models.CASCADE
)
status = models.CharField(
max_length=32,
default=FulfillmentStatus.FULFILLED,
choices=FulfillmentStatus.CHOICES,
)
tracking_number = models.CharField(max_length=255, default="", blank=True)
shipping_date = models.DateTimeField(default=now, editable=False)
def __str__(self):
return pgettext_lazy("Fulfillment str", "Fulfillment #%s") % (self.composed_id,)
def __iter__(self):
return iter(self.lines.all())
def save(self, *args, **kwargs):
if not self.pk:
groups = self.order.fulfillments.all()
existing_max = groups.aggregate(Max("fulfillment_order"))
existing_max = existing_max.get("fulfillment_order__max")
self.fulfillment_order = existing_max + 1 if existing_max is not None else 1
return super().save(*args, **kwargs)
@property
def composed_id(self):
return "%s-%s" % (self.order.id, self.fulfillment_order)
def can_edit(self):
return self.status != FulfillmentStatus.CANCELED
def get_total_quantity(self):
return sum([line.quantity for line in self])
class FulfillmentLine(models.Model):
order_line = models.ForeignKey(
OrderLine, related_name="+", on_delete=models.CASCADE
)
fulfillment = models.ForeignKey(
Fulfillment, related_name="lines", on_delete=models.CASCADE
)
quantity = models.PositiveIntegerField()
class OrderEvent(models.Model):
date = models.DateTimeField(default=now, editable=False)
type = models.CharField(
max_length=255,
choices=[
(type_name.upper(), type_name) for type_name, _ in OrderEvents.CHOICES
],
)
order = models.ForeignKey(Order, related_name="events", on_delete=models.CASCADE)
parameters = JSONField(blank=True, default=dict, encoder=CustomJsonEncoder)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
class Meta:
ordering = ("date",)
def __repr__(self):
return f"{self.__class__.__name__}(type={self.type!r}, user={self.user!r})"
| true | true |
f71bd651d2401162ae8d5fd7fa574451af7b41e4 | 3,518 | py | Python | check_updates.py | kunansy/CheckDependencyUpdates | 56ef905b7baf568e38c66ce39ca71115438a758b | [
"MIT"
] | null | null | null | check_updates.py | kunansy/CheckDependencyUpdates | 56ef905b7baf568e38c66ce39ca71115438a758b | [
"MIT"
] | null | null | null | check_updates.py | kunansy/CheckDependencyUpdates | 56ef905b7baf568e38c66ce39ca71115438a758b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import asyncio
import re
import sys
from itertools import groupby
from pathlib import Path
from typing import Optional, NamedTuple, AsyncIterable
import aiofiles
import aiohttp
PYPI_URL = "https://pypi.org/pypi/{package_name}/json"
class Package(NamedTuple):
name: str
required_version: str
last_version: Optional[str] = None
def __str__(self) -> str:
return f"Package: '{self.name}'\n" \
f"Required version: {self.required_version}\n" \
f"Last version: {self.last_version}"
async def get_last_version(session: aiohttp.ClientSession,
package_name: str) -> Optional[str]:
url = PYPI_URL.format(package_name=package_name)
try:
resp = await session.get(url)
except Exception as e:
print(f"{e.__class__.__name__}({e!r})", file=sys.stderr)
return
if resp.status == 200:
json = await resp.json()
resp.close()
return json['info']['version']
resp.close()
async def worker(args: asyncio.Queue,
results: asyncio.Queue,
session: aiohttp.ClientSession) -> None:
while True:
package = await args.get()
last_version = await get_last_version(session, package.name)
package = Package(
name=package.name,
required_version=package.required_version,
last_version=last_version
)
await results.put(package)
args.task_done()
async def get_packages(project_path: Path) -> AsyncIterable[Package]:
requirements_path = project_path / 'requirements.txt'
if not requirements_path.exists():
print("Requirements file not found", file=sys.stderr)
return
pattern = re.compile(r'([^<>= ]+)[<>= ]{2,4}(.+)')
async with aiofiles.open(requirements_path) as r:
async for requirement in r:
name, version = pattern.search(requirement).groups()
yield Package(name=name, required_version=version)
async def bound(project_path: Path) -> list[Package]:
timeout = aiohttp.ClientTimeout(60)
args = asyncio.Queue(maxsize=-1)
results = asyncio.Queue(maxsize=-1)
async with aiohttp.ClientSession(timeout=timeout) as ses:
async for package in get_packages(project_path):
await args.put(package)
tasks = []
for _ in range(5):
task = asyncio.create_task(worker(args, results, ses))
tasks += [task]
await args.join()
for task in tasks:
task.cancel()
return [
results.get_nowait()
for _ in range(results.qsize())
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Check updates of the requirements"
)
parser.add_argument(
'--path',
type=Path,
help="Path to the project",
default=Path('.'),
dest='path'
)
args = parser.parse_args()
packages = asyncio.run(bound(args.path))
key = lambda item: item.last_version != item.required_version
packages.sort(key=key)
for has_update, packages_ in groupby(packages, key=key):
if has_update:
print("Packages with updates: ")
else:
print("Packages without updates: ")
for num, package in enumerate(packages_, 1):
print(f"{num}.\n{package}", end='\n-------------\n')
print()
if __name__ == "__main__":
main()
| 26.451128 | 69 | 0.610006 |
import argparse
import asyncio
import re
import sys
from itertools import groupby
from pathlib import Path
from typing import Optional, NamedTuple, AsyncIterable
import aiofiles
import aiohttp
PYPI_URL = "https://pypi.org/pypi/{package_name}/json"
class Package(NamedTuple):
name: str
required_version: str
last_version: Optional[str] = None
def __str__(self) -> str:
return f"Package: '{self.name}'\n" \
f"Required version: {self.required_version}\n" \
f"Last version: {self.last_version}"
async def get_last_version(session: aiohttp.ClientSession,
package_name: str) -> Optional[str]:
url = PYPI_URL.format(package_name=package_name)
try:
resp = await session.get(url)
except Exception as e:
print(f"{e.__class__.__name__}({e!r})", file=sys.stderr)
return
if resp.status == 200:
json = await resp.json()
resp.close()
return json['info']['version']
resp.close()
async def worker(args: asyncio.Queue,
results: asyncio.Queue,
session: aiohttp.ClientSession) -> None:
while True:
package = await args.get()
last_version = await get_last_version(session, package.name)
package = Package(
name=package.name,
required_version=package.required_version,
last_version=last_version
)
await results.put(package)
args.task_done()
async def get_packages(project_path: Path) -> AsyncIterable[Package]:
requirements_path = project_path / 'requirements.txt'
if not requirements_path.exists():
print("Requirements file not found", file=sys.stderr)
return
pattern = re.compile(r'([^<>= ]+)[<>= ]{2,4}(.+)')
async with aiofiles.open(requirements_path) as r:
async for requirement in r:
name, version = pattern.search(requirement).groups()
yield Package(name=name, required_version=version)
async def bound(project_path: Path) -> list[Package]:
timeout = aiohttp.ClientTimeout(60)
args = asyncio.Queue(maxsize=-1)
results = asyncio.Queue(maxsize=-1)
async with aiohttp.ClientSession(timeout=timeout) as ses:
async for package in get_packages(project_path):
await args.put(package)
tasks = []
for _ in range(5):
task = asyncio.create_task(worker(args, results, ses))
tasks += [task]
await args.join()
for task in tasks:
task.cancel()
return [
results.get_nowait()
for _ in range(results.qsize())
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Check updates of the requirements"
)
parser.add_argument(
'--path',
type=Path,
help="Path to the project",
default=Path('.'),
dest='path'
)
args = parser.parse_args()
packages = asyncio.run(bound(args.path))
key = lambda item: item.last_version != item.required_version
packages.sort(key=key)
for has_update, packages_ in groupby(packages, key=key):
if has_update:
print("Packages with updates: ")
else:
print("Packages without updates: ")
for num, package in enumerate(packages_, 1):
print(f"{num}.\n{package}", end='\n-------------\n')
print()
if __name__ == "__main__":
main()
| true | true |
f71bd6747f7fe25f8bd69690c6d90ecfa7797cb2 | 27,045 | py | Python | amos/observe/traits.py | WithPrecedent/amos | 35b2f5b8d493eac946b583dfcd9d0553e7565292 | [
"Apache-2.0"
] | null | null | null | amos/observe/traits.py | WithPrecedent/amos | 35b2f5b8d493eac946b583dfcd9d0553e7565292 | [
"Apache-2.0"
] | null | null | null | amos/observe/traits.py | WithPrecedent/amos | 35b2f5b8d493eac946b583dfcd9d0553e7565292 | [
"Apache-2.0"
] | null | null | null | """
traits: tools for examining classes, instances, and other python objects
Corey Rayburn Yung <coreyrayburnyung@gmail.com>
Copyright 2021, Corey Rayburn Yung
License: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contents:
contains
contains_dict
contains_list
contains_set
contains_tuple
parallel_contains
serial_contains
get_annotations
get_attributes
get_contents
get_contents_types
get_methods
get_name
get_properties
get_signatures
get_types
get_variables
has_attributes
has_methods
has_properties
has_signatures
has_traits
has_types
has_types_dict
has_types_list
has_types_sequence
is_class_attribute
is_container
is_function
is_iterable
is_method
is_nested
is_sequence
name_attributes
name_methods
name_parameters
name_properties
name_variables
ToDo:
Adding parsing functionlity to signature-related functions to find
equivalence when one signature has subtypes of the other signature
(e.g., one type annotation is 'dict' and the other is 'MutableMapping').
It might be necessary to create a separate Signature-like class to
implement this functionality. This includes fixing or abandoning
'has_annotations' due to issues matching type annotations.
Add support for nagata Kinds once that system is complete.
Add support for types (using type annotations) in the 'contains' function so
that 'contains' can be applied to classes and not just instances.
Add 'dispatcher' framework to 'contains' once the dispatcher framework is
completed in the 'bobbie' package and the Kind system is completed in
the nagata package. This should replace existing usages of python's
singledispatch, which doesn't propertly deal with subtypes.
"""
from __future__ import annotations
from collections.abc import (
Container, Hashable, Iterable, Mapping, MutableSequence, Sequence, Set)
import functools
import inspect
import types
from typing import Any, Optional, Type, Union
from ..repair import modify
@functools.singledispatch
def contains(
item: object,
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether 'item' contains the type(s) in 'contents'.
Args:
item (object): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
raise TypeError(f'item {item} is not supported by {__name__}')
@contains.register(Mapping)
def dict_contains(
item: Mapping[Hashable, Any],
contents: tuple[Union[Type[Any], tuple[Type[Any], ...]],
Union[Type[Any], tuple[Type[Any], ...]]]) -> bool:
"""Returns whether dict 'item' contains the type(s) in 'contents'.
Args:
item (Mapping[Hashable, Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return (
serial_contains(item = item.keys(), contents = contents[0])
and serial_contains(item = item.values(), contents = contents[1]))
@contains.register(MutableSequence)
def list_contains(
item: MutableSequence[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether list 'item' contains the type(s) in 'contents'.
Args:
item (MutableSequence[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return serial_contains(item = item, contents = contents)
@contains.register(Set)
def set_contains(
item: Set[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether list 'item' contains the type(s) in 'contents'.
Args:
item (Set[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return serial_contains(item = item, contents = contents)
@contains.register(tuple)
def tuple_contains(
item: tuple[Any, ...],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether tuple 'item' contains the type(s) in 'contents'.
Args:
item (tuple[Any, ...]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
if isinstance(contents, tuple) and len(item) == len(contents):
technique = parallel_contains
else:
technique = serial_contains
return technique(item = item, contents = contents)
@contains.register(Sequence)
def parallel_contains(
item: Sequence[Any],
contents: tuple[Type[Any], ...]) -> bool:
"""Returns whether parallel 'item' contains the type(s) in 'contents'.
Args:
item (Sequence[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return all(isinstance(item[i], contents[i]) for i in enumerate(item))
@contains.register(Container)
def serial_contains(
item: Container[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether serial 'item' contains the type(s) in 'contents'.
Args:
item (Collection[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return all(isinstance(i, contents) for i in item)
def get_annotations(
item: object,
include_private: bool = False) -> dict[str, Type[Any]]:
"""Returns dict of attributes of 'item' with type annotations.
Args:
item (object): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of attributes in 'item' (keys are attribute names
and values are type annotations) that are type annotated.
"""
annotations = item.__annotations__
if include_private:
return annotations
else:
return {k: v for k, v in annotations.items() if not k.startswith('_')}
def get_attributes(
item: object,
include_private: bool = False) -> dict[str, Any]:
"""Returns dict of attributes of 'item'.
Args:
item (Any): item to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of attributes in 'item' (keys are attribute names
and values are attribute values).
"""
attributes = name_attributes(item = item, include_private = include_private)
values = [getattr(item, m) for m in attributes]
return dict(zip(attributes, values))
def get_methods(
item: Union[object, Type[Any]],
include_private: bool = False) -> dict[str, types.MethodType]:
"""Returns dict of methods of 'item'.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, types.MethodType]: dict of methods in 'item' (keys are method
names and values are methods).
"""
methods = name_methods(item = item, include_private = include_private)
return [getattr(item, m) for m in methods]
def get_name(item: Any, default: Optional[str] = None) -> Optional[str]:
"""Returns str name representation of 'item'.
Args:
item (Any): item to determine a str name.
default(Optional[str]): default name to return if other methods at name
creation fail.
Returns:
str: a name representation of 'item.'
"""
if isinstance(item, str):
return item
elif (
hasattr(item, 'name')
and not inspect.isclass(item)
and isinstance(item.name, str)):
return item.name
else:
try:
return modify.snakify(item.__name__)
except AttributeError:
if item.__class__.__name__ is not None:
return modify.snakify(item.__class__.__name__)
else:
return default
def get_properties(
item: object,
include_private: bool = False) -> dict[str, Any]:
"""Returns properties of 'item'.
Args:
item (object): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of properties in 'item' (keys are property names
and values are property values).
"""
properties = name_properties(item = item, include_private = include_private)
values = [getattr(item, p) for p in properties]
return dict(zip(properties, values))
def get_signatures(
item: Union[object, Type[Any]],
include_private: bool = False) -> dict[str, inspect.Signature]:
"""Returns dict of method signatures of 'item'.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, inspect.Signature]: dict of method signatures in 'item' (keys
are method names and values are method signatures).
"""
methods = name_methods(item = item, include_private = include_private)
signatures = [inspect.signature(getattr(item, m)) for m in methods]
return dict(zip(methods, signatures))
def get_variables(
item: object,
include_private: bool = False) -> dict[str, Any]:
"""Returns dict of attributes of 'item' that are not methods or properties.
Args:
item (object): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of attributes in 'item' (keys are attribute names
and values are attribute values) that are not methods or properties.
"""
attributes = name_attributes(item = item, include_private = include_private)
methods = name_methods(item = item, include_private = include_private)
properties = name_properties(item = item, include_private = include_private)
variables = [
a for a in attributes if a not in methods and a not in properties]
values = [getattr(item, m) for m in variables]
return dict(zip(variables, values))
def has_attributes(
item: Union[object, Type[Any]],
attributes: MutableSequence[str]) -> bool:
"""Returns whether 'attributes' exist in 'item'.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
attributes (MutableSequence[str]): names of attributes to check to see
if they exist in 'item'.
Returns:
bool: whether all 'attributes' exist in 'items'.
"""
return all(hasattr(item, a) for a in attributes)
def has_methods(
item: Union[object, Type[Any]],
methods: Union[str, MutableSequence[str]]) -> bool:
"""Returns whether 'item' has 'methods' which are methods.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
methods (Union[str, MutableSequence[str]]): name(s) of methods to check
to see if they exist in 'item' and are types.MethodType.
Returns:
bool: whether all 'methods' exist in 'items' and are types.MethodType.
"""
methods = list(convert.iterify(methods))
return all(is_method(item = item, attribute = m) for m in methods)
def has_properties(
item: Union[object, Type[Any]],
properties: Union[str, MutableSequence[str]]) -> bool:
"""Returns whether 'item' has 'properties' which are properties.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
properties (MutableSequence[str]): names of properties to check to see
if they exist in 'item' and are property type.
Returns:
bool: whether all 'properties' exist in 'items'.
"""
properties = list(convert.iterify(properties))
return all(is_property(item = item, attribute = p) for p in properties)
def has_signatures(
item: Union[object, Type[Any]],
signatures: Mapping[str, inspect.Signature]) -> bool:
"""Returns whether 'item' has 'signatures' of its methods.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
signatures (Mapping[str, inspect.Signature]): keys are the names of
methods and values are the corresponding method signatures.
Returns:
bool: whether all 'signatures' exist in 'items'.
"""
item_signatures = get_signatures(item = item, include_private = True)
pass_test = True
for name, parameters in signatures.items():
if (name not in item_signatures or item_signatures[name] != parameters):
pass_test = False
return pass_test
def has_traits(
item: Union[object, Type[Any]],
attributes: Optional[MutableSequence[str]] = None,
methods: Optional[MutableSequence[str]] = None,
properties: Optional[MutableSequence[str]] = None,
signatures: Optional[Mapping[str, inspect.Signature]] = None) -> bool:
"""Returns if 'item' has 'attributes', 'methods' and 'properties'.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
attributes (MutableSequence[str]): names of attributes to check to see
if they exist in 'item'.
methods (MutableSequence[str]): name(s) of methods to check to see if
they exist in 'item' and are types.MethodType.
properties (MutableSequence[str]): names of properties to check to see
if they exist in 'item' and are property type.
signatures (Mapping[str, inspect.Signature]): keys are the names of
methods and values are the corresponding method signatures.
Returns:
bool: whether all passed arguments exist in 'items'.
"""
if not inspect.isclass(item):
item = item.__class__
attributes = attributes or []
methods = methods or []
properties = properties or []
signatures = signatures or {}
return (
has_attributes(item = item, attributes = attributes)
and has_methods(item = item, methods = methods)
and has_properties(item = item, properties = properties)
and has_signatures(item = item, signatures = signatures))
@functools.singledispatch
def has_types(item: object) -> Optional[Union[
tuple[Type[Any], ...],
tuple[tuple[Type[Any], ...], tuple[Type[Any], ...]]]]:
"""Returns types contained in 'item'.
Args:
item (object): item to examine.
Returns:
Optional[Union[tuple[Type[Any], ...], tuple[tuple[Type[Any], ...],
tuple[Type[Any], ...]]]]:: returns the types of things contained
in 'item'. Returns None if 'item' is not a container.
"""
raise TypeError(f'item {item} is not supported by {__name__}')
@has_types.register(Mapping)
def has_types_dict(
item: Mapping[Hashable, Any]) -> Optional[
tuple[tuple[Type[Any], ...], tuple[Type[Any], ...]]]:
"""Returns types contained in 'item'.
Args:
item (object): item to examine.
Returns:
Optional[tuple[Type[Any], ...]]: returns the types of things contained
in 'item'. Returns None if 'item' is not a container.
"""
if isinstance(item, Mapping):
key_types = has_types_sequence(item = item.keys())
value_types = has_types_sequence(item = item.values())
return tuple([key_types, value_types])
else:
return None
@has_types.register(MutableSequence)
def has_types_list(item: list[Any]) -> Optional[tuple[Type[Any], ...]]:
"""Returns types contained in 'item'.
Args:
item (list[Any]): item to examine.
Returns:
Optional[tuple[Type[Any], ...]]: returns the types of things contained
in 'item'. Returns None if 'item' is not a container.
"""
if isinstance(item, list):
key_types = has_types_sequence(item = item.keys())
value_types = has_types_sequence(item = item.values())
return tuple([key_types, value_types])
else:
return None
@has_types.register(Sequence)
def has_types_sequence(item: Sequence[Any]) -> Optional[tuple[Type[Any], ...]]:
"""Returns types contained in 'item'.
Args:
item (Sequence[Any]): item to examine.
Returns:
Optional[tuple[Type[Any], ...]]: returns the types of things contained
in 'item'. Returns None if 'item' is not a container.
"""
if isinstance(item, Sequence):
all_types = []
for thing in item:
kind = type(thing)
if not kind in all_types:
all_types.append(kind)
return tuple(all_types)
else:
return None
def is_class_attribute(item: Union[object, Type[Any]], attribute: str) -> bool:
"""Returns if 'attribute' is a class attribute of 'item'."""
if not inspect.isclass(item):
item = item.__class__
return (
hasattr(item, attribute)
and not is_method(item = item, attribute = attribute)
and not is_property(item = item, attribute = attribute))
def is_container(item: Union[object, Type[Any]]) -> bool:
"""Returns if 'item' is a container and not a str.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
Returns:
bool: if 'item' is a container but not a str.
"""
if not inspect.isclass(item):
item = item.__class__
return issubclass(item, Container) and not issubclass(item, str)
def is_function(item: Union[object, Type[Any]], attribute: Any) -> bool:
"""Returns if 'attribute' is a function of 'item'."""
if isinstance(attribute, str):
try:
attribute = getattr(item, attribute)
except AttributeError:
return False
return isinstance(attribute, types.FunctionType)
def is_iterable(item: Union[object, Type[Any]]) -> bool:
"""Returns if 'item' is iterable and is NOT a str type.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
Returns:
bool: if 'item' is iterable but not a str.
"""
if not inspect.isclass(item):
item = item.__class__
return issubclass(item, Iterable) and not issubclass(item, str)
def is_method(item: Union[object, Type[Any]], attribute: Any) -> bool:
"""Returns if 'attribute' is a method of 'item'."""
if isinstance(attribute, str):
try:
attribute = getattr(item, attribute)
except AttributeError:
return False
return inspect.ismethod(attribute)
def is_nested(item: Mapping[Any, Any]) -> bool:
"""Returns if 'item' is nested at least one-level.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
Returns:
bool: if 'item' is a nested mapping.
"""
return (
isinstance(item, Mapping)
and any(isinstance(v, Mapping) for v in item.values()))
def is_property(item: Union[object, Type[Any]], attribute: Any) -> bool:
"""Returns if 'attribute' is a property of 'item'."""
if not inspect.isclass(item):
item = item.__class__
if isinstance(attribute, str):
try:
attribute = getattr(item, attribute)
except AttributeError:
return False
return isinstance(attribute, property)
def is_sequence(item: Union[object, Type[Any]]) -> bool:
"""Returns if 'item' is a sequence and is NOT a str type.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
Returns:
bool: if 'item' is a sequence but not a str.
"""
if not inspect.isclass(item):
item = item.__class__
return issubclass(item, Sequence) and not issubclass(item, str)
def is_variable(item: Union[object, Type[Any]], attribute: str) -> bool:
"""Returns if 'attribute' is a simple data attribute of 'item'.
Args:
item (Union[object, Type[Any]]): [description]
attribute (str): [description]
Returns:
bool: [description]
"""
return (
hasattr(item, attribute)
and not is_function(item = item, attribute = attribute)
and not is_property(item = item, attribute = attribute))
def name_attributes(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
"""Returns attribute names of 'item'.
Args:
item (Union[object, Type[Any]]): item to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
list[str]: names of attributes in 'item'.
"""
names = dir(item)
if not include_private:
names = modify.drop_privates(item = names)
return names
def name_methods(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
"""Returns method names of 'item'.
Args:
item (Union[object, Type[Any]]): item to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
list[str]: names of methods in 'item'.
"""
methods = [
a for a in dir(item)
if is_method(item = item, attribute = a)]
if not include_private:
methods = modify.drop_privates(item = methods)
return methods
def name_parameters(item: Type[Any]) -> list[str]:
"""Returns list of parameters based on annotations of 'item'.
Args:
item (Type[Any]): class to get parameters to.
Returns:
list[str]: names of parameters in 'item'.
"""
return list(item.__annotations__.keys())
def name_properties(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
"""Returns method names of 'item'.
Args:
item (Union[object, Type[Any]]): item to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
list[str]: names of properties in 'item'.
"""
if not inspect.isclass(item):
item = item.__class__
properties = [
a for a in dir(item)
if is_property(item = item, attribute = a)]
if not include_private:
properties = modify.drop_privates(item = properties)
return properties
def name_variables(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
"""Returns variable names of 'item'.
Args:
item (Union[object, Type[Any]]): item to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
list[str]: names of attributes in 'item' that are neither methods nor
properties.
"""
names = [a for a in dir(item) if is_variable(item = item, attribute = a)]
if not include_private:
names = modify.drop_privates(item = names)
return names
# def has_annotations(
# item: Union[object, Type[Any]],
# attributes: Mapping[str, Type[Any]]) -> bool:
# """Returns whether 'attributes' exist in 'item' and are the right type.
# Args:
# item (Union[object, Type[Any]]): class or instance to examine.
# attributes (dict[str, Type[Any]]): dict where keys are the attribute
# names and values are the expected types of whose named attributes.
# Returns
# bool: whether all of the 'attributes' exist in 'item' and are of the
# proper type.
# """
# matched = True
# if inspect.isclass(item):
# for attribute, value in attributes.items():
# if value is not None:
# try:
# testing = getattr(item, attribute)
# testing = item.__annotations__[testing]
# except AttributeError:
# return False
# try:
# if not issubclass(testing, value):
# return False
# except TypeError:
# pass
# else:
# for attribute, value in attributes.items():
# if value is not None:
# try:
# testing = getattr(item, attribute)
# except AttributeError:
# return False
# try:
# if not isinstance(testing, value):
# return False
# except TypeError:
# pass
# return matched
| 34.147727 | 80 | 0.614346 | from __future__ import annotations
from collections.abc import (
Container, Hashable, Iterable, Mapping, MutableSequence, Sequence, Set)
import functools
import inspect
import types
from typing import Any, Optional, Type, Union
from ..repair import modify
@functools.singledispatch
def contains(
item: object,
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
raise TypeError(f'item {item} is not supported by {__name__}')
@contains.register(Mapping)
def dict_contains(
item: Mapping[Hashable, Any],
contents: tuple[Union[Type[Any], tuple[Type[Any], ...]],
Union[Type[Any], tuple[Type[Any], ...]]]) -> bool:
return (
serial_contains(item = item.keys(), contents = contents[0])
and serial_contains(item = item.values(), contents = contents[1]))
@contains.register(MutableSequence)
def list_contains(
item: MutableSequence[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
return serial_contains(item = item, contents = contents)
@contains.register(Set)
def set_contains(
item: Set[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
return serial_contains(item = item, contents = contents)
@contains.register(tuple)
def tuple_contains(
item: tuple[Any, ...],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
if isinstance(contents, tuple) and len(item) == len(contents):
technique = parallel_contains
else:
technique = serial_contains
return technique(item = item, contents = contents)
@contains.register(Sequence)
def parallel_contains(
item: Sequence[Any],
contents: tuple[Type[Any], ...]) -> bool:
return all(isinstance(item[i], contents[i]) for i in enumerate(item))
@contains.register(Container)
def serial_contains(
item: Container[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
return all(isinstance(i, contents) for i in item)
def get_annotations(
item: object,
include_private: bool = False) -> dict[str, Type[Any]]:
annotations = item.__annotations__
if include_private:
return annotations
else:
return {k: v for k, v in annotations.items() if not k.startswith('_')}
def get_attributes(
item: object,
include_private: bool = False) -> dict[str, Any]:
attributes = name_attributes(item = item, include_private = include_private)
values = [getattr(item, m) for m in attributes]
return dict(zip(attributes, values))
def get_methods(
item: Union[object, Type[Any]],
include_private: bool = False) -> dict[str, types.MethodType]:
methods = name_methods(item = item, include_private = include_private)
return [getattr(item, m) for m in methods]
def get_name(item: Any, default: Optional[str] = None) -> Optional[str]:
if isinstance(item, str):
return item
elif (
hasattr(item, 'name')
and not inspect.isclass(item)
and isinstance(item.name, str)):
return item.name
else:
try:
return modify.snakify(item.__name__)
except AttributeError:
if item.__class__.__name__ is not None:
return modify.snakify(item.__class__.__name__)
else:
return default
def get_properties(
item: object,
include_private: bool = False) -> dict[str, Any]:
properties = name_properties(item = item, include_private = include_private)
values = [getattr(item, p) for p in properties]
return dict(zip(properties, values))
def get_signatures(
item: Union[object, Type[Any]],
include_private: bool = False) -> dict[str, inspect.Signature]:
methods = name_methods(item = item, include_private = include_private)
signatures = [inspect.signature(getattr(item, m)) for m in methods]
return dict(zip(methods, signatures))
def get_variables(
item: object,
include_private: bool = False) -> dict[str, Any]:
attributes = name_attributes(item = item, include_private = include_private)
methods = name_methods(item = item, include_private = include_private)
properties = name_properties(item = item, include_private = include_private)
variables = [
a for a in attributes if a not in methods and a not in properties]
values = [getattr(item, m) for m in variables]
return dict(zip(variables, values))
def has_attributes(
item: Union[object, Type[Any]],
attributes: MutableSequence[str]) -> bool:
return all(hasattr(item, a) for a in attributes)
def has_methods(
item: Union[object, Type[Any]],
methods: Union[str, MutableSequence[str]]) -> bool:
methods = list(convert.iterify(methods))
return all(is_method(item = item, attribute = m) for m in methods)
def has_properties(
item: Union[object, Type[Any]],
properties: Union[str, MutableSequence[str]]) -> bool:
properties = list(convert.iterify(properties))
return all(is_property(item = item, attribute = p) for p in properties)
def has_signatures(
item: Union[object, Type[Any]],
signatures: Mapping[str, inspect.Signature]) -> bool:
item_signatures = get_signatures(item = item, include_private = True)
pass_test = True
for name, parameters in signatures.items():
if (name not in item_signatures or item_signatures[name] != parameters):
pass_test = False
return pass_test
def has_traits(
item: Union[object, Type[Any]],
attributes: Optional[MutableSequence[str]] = None,
methods: Optional[MutableSequence[str]] = None,
properties: Optional[MutableSequence[str]] = None,
signatures: Optional[Mapping[str, inspect.Signature]] = None) -> bool:
if not inspect.isclass(item):
item = item.__class__
attributes = attributes or []
methods = methods or []
properties = properties or []
signatures = signatures or {}
return (
has_attributes(item = item, attributes = attributes)
and has_methods(item = item, methods = methods)
and has_properties(item = item, properties = properties)
and has_signatures(item = item, signatures = signatures))
@functools.singledispatch
def has_types(item: object) -> Optional[Union[
tuple[Type[Any], ...],
tuple[tuple[Type[Any], ...], tuple[Type[Any], ...]]]]:
raise TypeError(f'item {item} is not supported by {__name__}')
@has_types.register(Mapping)
def has_types_dict(
item: Mapping[Hashable, Any]) -> Optional[
tuple[tuple[Type[Any], ...], tuple[Type[Any], ...]]]:
if isinstance(item, Mapping):
key_types = has_types_sequence(item = item.keys())
value_types = has_types_sequence(item = item.values())
return tuple([key_types, value_types])
else:
return None
@has_types.register(MutableSequence)
def has_types_list(item: list[Any]) -> Optional[tuple[Type[Any], ...]]:
if isinstance(item, list):
key_types = has_types_sequence(item = item.keys())
value_types = has_types_sequence(item = item.values())
return tuple([key_types, value_types])
else:
return None
@has_types.register(Sequence)
def has_types_sequence(item: Sequence[Any]) -> Optional[tuple[Type[Any], ...]]:
if isinstance(item, Sequence):
all_types = []
for thing in item:
kind = type(thing)
if not kind in all_types:
all_types.append(kind)
return tuple(all_types)
else:
return None
def is_class_attribute(item: Union[object, Type[Any]], attribute: str) -> bool:
if not inspect.isclass(item):
item = item.__class__
return (
hasattr(item, attribute)
and not is_method(item = item, attribute = attribute)
and not is_property(item = item, attribute = attribute))
def is_container(item: Union[object, Type[Any]]) -> bool:
if not inspect.isclass(item):
item = item.__class__
return issubclass(item, Container) and not issubclass(item, str)
def is_function(item: Union[object, Type[Any]], attribute: Any) -> bool:
if isinstance(attribute, str):
try:
attribute = getattr(item, attribute)
except AttributeError:
return False
return isinstance(attribute, types.FunctionType)
def is_iterable(item: Union[object, Type[Any]]) -> bool:
if not inspect.isclass(item):
item = item.__class__
return issubclass(item, Iterable) and not issubclass(item, str)
def is_method(item: Union[object, Type[Any]], attribute: Any) -> bool:
if isinstance(attribute, str):
try:
attribute = getattr(item, attribute)
except AttributeError:
return False
return inspect.ismethod(attribute)
def is_nested(item: Mapping[Any, Any]) -> bool:
return (
isinstance(item, Mapping)
and any(isinstance(v, Mapping) for v in item.values()))
def is_property(item: Union[object, Type[Any]], attribute: Any) -> bool:
if not inspect.isclass(item):
item = item.__class__
if isinstance(attribute, str):
try:
attribute = getattr(item, attribute)
except AttributeError:
return False
return isinstance(attribute, property)
def is_sequence(item: Union[object, Type[Any]]) -> bool:
if not inspect.isclass(item):
item = item.__class__
return issubclass(item, Sequence) and not issubclass(item, str)
def is_variable(item: Union[object, Type[Any]], attribute: str) -> bool:
return (
hasattr(item, attribute)
and not is_function(item = item, attribute = attribute)
and not is_property(item = item, attribute = attribute))
def name_attributes(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
names = dir(item)
if not include_private:
names = modify.drop_privates(item = names)
return names
def name_methods(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
methods = [
a for a in dir(item)
if is_method(item = item, attribute = a)]
if not include_private:
methods = modify.drop_privates(item = methods)
return methods
def name_parameters(item: Type[Any]) -> list[str]:
return list(item.__annotations__.keys())
def name_properties(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
if not inspect.isclass(item):
item = item.__class__
properties = [
a for a in dir(item)
if is_property(item = item, attribute = a)]
if not include_private:
properties = modify.drop_privates(item = properties)
return properties
def name_variables(
item: Union[object, Type[Any]],
include_private: bool = False) -> list[str]:
names = [a for a in dir(item) if is_variable(item = item, attribute = a)]
if not include_private:
names = modify.drop_privates(item = names)
return names
# Args:
# item (Union[object, Type[Any]]): class or instance to examine.
# attributes (dict[str, Type[Any]]): dict where keys are the attribute
# names and values are the expected types of whose named attributes.
# Returns
# bool: whether all of the 'attributes' exist in 'item' and are of the
# proper type.
# """
| true | true |
f71bd6d48a0ed9d4e130a00c843f740473f02ef1 | 14,332 | py | Python | TaskList/Task.py | CaptainDesAstres/Blender-Render-Manager | 39082e7833383bbe7dd414381f1b295e3b778439 | [
"MIT"
] | 5 | 2015-07-22T03:02:17.000Z | 2018-10-11T10:07:42.000Z | TaskList/Task.py | CaptainDesAstres/Blender-Render-Manager | 39082e7833383bbe7dd414381f1b295e3b778439 | [
"MIT"
] | null | null | null | TaskList/Task.py | CaptainDesAstres/Blender-Render-Manager | 39082e7833383bbe7dd414381f1b295e3b778439 | [
"MIT"
] | 1 | 2018-10-11T10:07:43.000Z | 2018-10-11T10:07:43.000Z | #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage task settings'''
import xml.etree.ElementTree as xmlMod
import os, uuid, subprocess, shlex, time, datetime, threading
from save import *
from usefullFunctions import *
from Preferences.PresetList.Preset.Preset import *
from TaskList.FileInfo.FileInfo import *
from TaskList.TaskLog.TaskLog import *
class Task:
'''class to manage task settings'''
def __init__(self, path = None, scene = None, preset = None,\
fileInfo = None, xml= None):
'''initialize task object with default settings or saved settings'''
self.running = False
if xml is None:
self.defaultInit(path, scene, preset, fileInfo)
else:
self.fromXml(xml)
def defaultInit(self, path, scene, preset, fileInfo):
'''initialize Task object with default settings'''
self.path = path
self.scene = scene
self.preset = preset
self.info = fileInfo
self.uid = uuid.uuid4().hex
self.log = None
self.status = 'waiting'
# self.status possible values:
# waiting > the task have been set and is waiting to be run
# lock > the task is protected against running
# pendinglock> same thing for a task that already have been started
# ready > the task have been run once and task.log is set
# running > the task is running
# pause > the task have been started but is now waiting to be continued
# ended > the task have been totaly rendered
# erased > the task have been erased
def fromXml(self, xml):
'''initialize Task object with savedd settings'''
self.path = xml.get('path')
self.scene = xml.get('scene')
self.preset = xml.get('preset')
self.uid = xml.get('uid', uuid.uuid4().hex)
self.status = xml.get('status')
self.info = FileInfo(xml.find('fileInfo'))
node = xml.find('log')
if node is not None:
self.log = TaskLog(xml = node)
else:
self.log = None
def toXml(self):
'''export task settings into xml syntaxed string'''
xml = '<task path="'+XML.encode(self.path)+'" scene="'+XML.encode(self.scene)\
+'" preset="'+self.preset+'" uid="'+self.uid\
+'" status="'+self.status+'" >\n'\
+self.info.toXml()
if self.log is not None:
xml += self.log.toXml()
xml += '</task>\n'
return xml
def menu(self, log, index, tasks, preferences):
'''method to edit task settings'''
log.menuIn('Task n°'+str(index))
change = False
started = self.log is not None
if started:
menu = '''
Menu :
(TASK ALREADY STARTED : SOME OPTIONS IS NOT AVAILABLE!)
5- Change list row
6- Lock/Unlock task
7- Erase task
8- Copy task
9- See Rendering Log
0- Quit and save
'''
else:
menu = '''
Menu :
1- Change scene
2- Change preset
3- Edit preset
4- Active/desactive Renderlayer
5- Change list row
6- Lock/Unlock task
7- Erase task
8- Copy task
0- Quit and save
'''
while True:
log.print()
print('\n Edit Task n°'+str(index)+' :')
self.print()
print(menu)
choice= input('action : ').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()
return change
elif choice == '1' and not started:
scene = self.info.sceneChoice(log, allChoice = False)
if scene is not None:
self.scene = scene[0]
log.write('Task n°'+str(index)+' : Scene set to «'+self.scene+'»')
change = True
elif choice == '2' and not started:
preset = Task.presetChoice(log, preferences)
if preset is not None :
self.preset = preset
log.write('Task n°'+str(index)+' : Preset set to «'+self.preset+'»')
change = True
elif choice == '3' and not started:
self.editPreset(log, preferences)
elif choice == '4' and not started:
confirm = self.info.scenes[self.scene].renderlayerActivator(log)
if confirm:
log.write('change task n°'+str(index)+' active renderlayer')
change = True
elif choice == '5':
confirm, select = tasks.move(log, [index])
if confirm:
change = True
index = select[0]
elif choice == '6':
if self.status in ['ready', 'pause']:
self.status = 'pendinglock'
change = True
log.write('Task n°'+str(index)+' locked')
elif self.status == 'waiting':
self.status = 'lock'
change = True
log.write('Task n°'+str(index)+' locked')
elif self.status == 'pendinglock':
self.status = 'pause'
change = True
log.write('Task n°'+str(index)+' unlocked')
elif self.status == 'lock':
self.status = 'waiting'
change = True
log.write('Task n°'+str(index)+' unlocked')
else:
log.error('Task n°'+str(index)+' is not lockable/unlockable')
elif choice == '7':
if tasks.remove(log, [index]):
log.menuOut()
log.write('Task n°'+str(index)+' removed')
return True
elif choice == '8':
new = self.copy()
new.status = 'waiting'
new.log = None
tasks.tasks.append(new)
log.write('a copy of the task n°'+str(index)+' have been added at the bottom of the task list')
change = True
elif choice == '9' and started:
self.log.menu(log, index)
else:
log.error('Unknow request!', False)
def menuArchive(self, log, index, tasks):
'''method to edit task settings'''
log.menuIn('Archived Task n°'+str(index))
change = False
while True:
log.print()
print('\n Task n°'+str(index)+' Log :')
self.print()
choice = input('''
Menu :
1- See Rendering Log
2- Copy Task In Rendering List
3- Erase Archived Task
0- Quit and save
action : ''').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()
return change
elif choice == '1':
self.log.menu(log, index)
elif choice == '2':
new = self.copy()
new.status = 'waiting'
new.log = None
tasks.tasks.append(new)
log.write('A copy of the archived task n°'+str(index)+' have been added at the bottom of the pending task list.')
change = True
elif choice == '3':
conf = input('\n\nThe task gone be definitly erased. Confirm action (y) :').strip().lower()
if conf in ['y', 'yes']:
tasks.archive.pop(index)
log.write('The archived task n°'+str(index)+' have been erased.')
log.menuOut()
return True
else:
log.error('Unknow request!', False)
def print(self):
'''A method to print task information'''
print('\n\nStatus : '+self.status)
print('Path : '+self.path)
print('File Name : '+self.path.split('/').pop())
print('Scene : '+self.scene)
print('Preset : '+self.preset+'\n')
print('\033[4mActive Renderlayer :\033[0m')
self.info.scenes[self.scene].printActiveRenderlayer()
print('\n')
def renamePreset(self, old, new):
'''a method to rename used preset'''
if self.preset == old:
self.preset = new
def erasePreset(self, preset):
'''a method to stop using preset'''
if self.preset == preset:
self.preset = '[default]'
def getRow(self):
'''A method to get row to print task list'''
name = self.path.split('/').pop()
return columnLimit(' '+name, 25, 5)\
+columnLimit(' '+self.scene, 25, 5)\
+columnLimit(' '+self.preset, 25, 5)
def presetChoice(log, preferences):
'''A method to choose a preset'''
# preset choice
log.menuIn('Preset Choice')
log.print()
print('\n\n \033[4mPreset Choice :\033[0m\n\n')
confirm = input('Use «'+preferences.presets.default+'» default preset? (type anything else that y or yes to choose another one)')
if confirm in ['', 'y', 'yes']:
log.menuOut()
return '[default]'
else:
preset = preferences.presets.choose(log)
log.menuOut()
return preset
def editPreset(self, log, preferences):
'''A method to edit the preset used by the task'''
log.error('Warning : all change made to the preset will be effective for all task that use it…')
if self.preset == '[default]' :
name = preferences.presets.default
preset = preferences.presets.presets[name]
else:
name = self.preset
preset = preferences.presets.presets[name]
if type(preset) is Preset:
confirm = preset.menu(log, name, preferences.blenderVersion)
else:
confirm = preset.menu(log, name, preferences.presets)
if confirm:
savePreferences(preferences)
def copy(self):
xml = '<?xml version="1.0" encoding="UTF-8"?>\n'+self.toXml()
xml = xmlMod.fromstring(xml)
copy = Task(xml = xml)
copy.uid = uuid.uuid4().hex
return copy
def printRunMenu(self, index, count, log):
'''print current runninge state'''
log.print()
print('\n\nRun task n°'+str(index)+' of '+str(count)+' :\n\n')
if self.log is not None:
self.log.print()
log.runPrint()
def run(self, index, taskList, scriptPath, log, preferences):
'''A method to execute the task'''
log.menuIn('run Task '+str(index)+' from '+str(len(taskList.tasks)))
if self.log is None:
# task never have been run before
self.log = TaskLog(pref = preferences, task = self)
preferences.output.checkAndCreate(self, preferences, taskList)
self.printRunMenu(index, len(taskList.tasks), log)
metapreset = self.log.preset
if type(metapreset) is Preset:
if self.log.groups[0].remaining() > 0:
versions = { metapreset.engine.version : '[default]' }
else:
versions = {}
for group in self.log.groups:
if group.remaining() > 0:
if group.preset.engine.version in versions.keys():
versions[group.preset.engine.version].append(group.name)
else:
versions[group.preset.engine.version] = [group.name]
scripts = self.createTaskScript(scriptPath, preferences, versions, metapreset)
results = ''
for version in versions.keys():
try:
l = threading.Thread(target = self.socketAcceptClient,
args=(taskList, index, log))
l.start()
taskList.listenerThreads.append(l)
sub = subprocess.Popen(\
shlex.split(\
preferences.blenderVersion.getVersionPath(version)\
+' -b "'+self.path+'" -P "'\
+scripts[version]+'"'),\
stdout = subprocess.PIPE,\
stdin = subprocess.PIPE,\
stderr = subprocess.PIPE)
taskList.renderingSubprocess.append(sub)
result = sub.communicate()
taskList.renderingSubprocess.remove(sub)
results += result[0].decode()+result[1].decode()+'\n\n\n'
except FileNotFoundError:
log.write('\033[31mTask n°'+str(index)+' : Blender version call error! Try to verify the path of «'+version+'» blender version!\033[0m')
if taskList.runningMode in [taskList.UNTIL_GROUP_END,\
taskList.UNTIL_FRAME_END,\
taskList.STOP_NOW,\
taskList.STOP_FORCED]:
break
self.eraseTaskScript(scripts)
log.menuOut()
return True
def socketAcceptClient(self, taskList, index, log):
'''A method to manage client connexion when running'''
client = taskList.socket.accept()[0]
taskList.listenerSockets.append(
{
'socket':client,
'uid':self.uid
}
)
msg = ''
while taskList.runningMode < taskList.STOP_NOW:
msg += client.recv(1024).decode()
if msg == '':
time.sleep(1)
elif msg == self.uid+' VersionEnded EOS':
break
else:
msg = self.treatSocketMessage(msg, taskList, index, log)
client.close()
def treatSocketMessage(self, msg, taskList, index, log):
'''a method to interpret socket message'''
if msg[-4:] != ' EOS':
return msg
messages = msg.split(' EOS')
messages.pop()
for m in messages:
# normally, the message is to confirm the rendering of a frame, it must follow this sytaxe:
#uid action(group,frame,date,computingTime) EOS
#fc9b9d6fd2af4e0fb3f09066f9902f90 ConfirmFrame(groupe1,15,10:09:2014:10:30:40,11111111111111) EOS
uid = m[0:32]
action = m[33:m.find('(')]
info = m[46:-1]
if uid == self.uid and action == 'debugMsg':
log.write(info)
elif uid == self.uid and action == 'ConfirmFrame':
info = info.split(',')
group = info[0]
frame = int(info[1])
computingTime = float(info[3])
date = info[2].split(':')
date = datetime.datetime(
year = int(date[2]),
month = int(date[1]),
day = int(date[0]),
hour = int(date[3]),
minute = int(date[4]),
second = int(date[5])
)
self.log.getGroup(group).confirmFrame(frame, date, computingTime)
self.printRunMenu(index, len(taskList.tasks), log)
if messages[-1] == self.uid+' VersionEnded':
return messages[-1]+' EOS'
else:
return ''
def createTaskScript(self, scriptPath, preferences, versions, preset):
'''create a script for each blender versions to run tfhe task'''
start = '''#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
''\'module to manage metapreset''\'
import sys
sys.path.append("'''+scriptPath+'''")
import xml.etree.ElementTree as xmlMod
from Preferences.Preferences import *
from Preferences.PresetList.Preset.Preset import *
from Preferences.PresetList.Preset.Metapreset import *
from TaskList.RenderingTask.RenderingTask import *
from TaskList.Task import *
preferences = Preferences( xml = xmlMod.fromstring(''\''''+preferences.toXml(False)+'''''\') )
task = Task( xml = xmlMod.fromstring(''\'<?xml version="1.0" encoding="UTF-8"?>\n'''+self.toXml()+'''''\'))
'''
end = '\nRenderingTask(task, preferences, groups)'
paths = {}
for v, g in versions.items():
script = start\
+'groups = ["'+('", "'.join(g) )+'"]\n'\
+end
paths[v] = scriptPath+'/TaskList/RenderingTask/TaskScripts/'+self.uid+'-'+v+'.py'
with open(paths[v],'w') as taskScriptFile:
taskScriptFile.write( script )
return paths
def eraseTaskScript(self, scripts):
'''erase Task Script files'''
for path in scripts.values():
os.remove(path)
def getUsefullGroup(self, groups, preferences):
'''return only usefull group from the list, excluding those who have no renderlayer in this task'''
renderlayers = self.info.scenes[self.scene].getActiveRenderlayers()
confirmed = []
for group in groups:
for RL in renderlayers:
if preferences.presets.renderlayers.groups[group].belongTo(RL.name):
confirmed.append(group)
break
return confirmed
| 25.321555 | 140 | 0.629431 |
import xml.etree.ElementTree as xmlMod
import os, uuid, subprocess, shlex, time, datetime, threading
from save import *
from usefullFunctions import *
from Preferences.PresetList.Preset.Preset import *
from TaskList.FileInfo.FileInfo import *
from TaskList.TaskLog.TaskLog import *
class Task:
def __init__(self, path = None, scene = None, preset = None,\
fileInfo = None, xml= None):
self.running = False
if xml is None:
self.defaultInit(path, scene, preset, fileInfo)
else:
self.fromXml(xml)
def defaultInit(self, path, scene, preset, fileInfo):
self.path = path
self.scene = scene
self.preset = preset
self.info = fileInfo
self.uid = uuid.uuid4().hex
self.log = None
self.status = 'waiting'
def fromXml(self, xml):
self.path = xml.get('path')
self.scene = xml.get('scene')
self.preset = xml.get('preset')
self.uid = xml.get('uid', uuid.uuid4().hex)
self.status = xml.get('status')
self.info = FileInfo(xml.find('fileInfo'))
node = xml.find('log')
if node is not None:
self.log = TaskLog(xml = node)
else:
self.log = None
def toXml(self):
xml = '<task path="'+XML.encode(self.path)+'" scene="'+XML.encode(self.scene)\
+'" preset="'+self.preset+'" uid="'+self.uid\
+'" status="'+self.status+'" >\n'\
+self.info.toXml()
if self.log is not None:
xml += self.log.toXml()
xml += '</task>\n'
return xml
def menu(self, log, index, tasks, preferences):
log.menuIn('Task n°'+str(index))
change = False
started = self.log is not None
if started:
menu = '''
Menu :
(TASK ALREADY STARTED : SOME OPTIONS IS NOT AVAILABLE!)
5- Change list row
6- Lock/Unlock task
7- Erase task
8- Copy task
9- See Rendering Log
0- Quit and save
'''
else:
menu = '''
Menu :
1- Change scene
2- Change preset
3- Edit preset
4- Active/desactive Renderlayer
5- Change list row
6- Lock/Unlock task
7- Erase task
8- Copy task
0- Quit and save
'''
while True:
log.print()
print('\n Edit Task n°'+str(index)+' :')
self.print()
print(menu)
choice= input('action : ').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()
return change
elif choice == '1' and not started:
scene = self.info.sceneChoice(log, allChoice = False)
if scene is not None:
self.scene = scene[0]
log.write('Task n°'+str(index)+' : Scene set to «'+self.scene+'»')
change = True
elif choice == '2' and not started:
preset = Task.presetChoice(log, preferences)
if preset is not None :
self.preset = preset
log.write('Task n°'+str(index)+' : Preset set to «'+self.preset+'»')
change = True
elif choice == '3' and not started:
self.editPreset(log, preferences)
elif choice == '4' and not started:
confirm = self.info.scenes[self.scene].renderlayerActivator(log)
if confirm:
log.write('change task n°'+str(index)+' active renderlayer')
change = True
elif choice == '5':
confirm, select = tasks.move(log, [index])
if confirm:
change = True
index = select[0]
elif choice == '6':
if self.status in ['ready', 'pause']:
self.status = 'pendinglock'
change = True
log.write('Task n°'+str(index)+' locked')
elif self.status == 'waiting':
self.status = 'lock'
change = True
log.write('Task n°'+str(index)+' locked')
elif self.status == 'pendinglock':
self.status = 'pause'
change = True
log.write('Task n°'+str(index)+' unlocked')
elif self.status == 'lock':
self.status = 'waiting'
change = True
log.write('Task n°'+str(index)+' unlocked')
else:
log.error('Task n°'+str(index)+' is not lockable/unlockable')
elif choice == '7':
if tasks.remove(log, [index]):
log.menuOut()
log.write('Task n°'+str(index)+' removed')
return True
elif choice == '8':
new = self.copy()
new.status = 'waiting'
new.log = None
tasks.tasks.append(new)
log.write('a copy of the task n°'+str(index)+' have been added at the bottom of the task list')
change = True
elif choice == '9' and started:
self.log.menu(log, index)
else:
log.error('Unknow request!', False)
def menuArchive(self, log, index, tasks):
log.menuIn('Archived Task n°'+str(index))
change = False
while True:
log.print()
print('\n Task n°'+str(index)+' Log :')
self.print()
choice = input('''
Menu :
1- See Rendering Log
2- Copy Task In Rendering List
3- Erase Archived Task
0- Quit and save
action : ''').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()
return change
elif choice == '1':
self.log.menu(log, index)
elif choice == '2':
new = self.copy()
new.status = 'waiting'
new.log = None
tasks.tasks.append(new)
log.write('A copy of the archived task n°'+str(index)+' have been added at the bottom of the pending task list.')
change = True
elif choice == '3':
conf = input('\n\nThe task gone be definitly erased. Confirm action (y) :').strip().lower()
if conf in ['y', 'yes']:
tasks.archive.pop(index)
log.write('The archived task n°'+str(index)+' have been erased.')
log.menuOut()
return True
else:
log.error('Unknow request!', False)
def print(self):
print('\n\nStatus : '+self.status)
print('Path : '+self.path)
print('File Name : '+self.path.split('/').pop())
print('Scene : '+self.scene)
print('Preset : '+self.preset+'\n')
print('\033[4mActive Renderlayer :\033[0m')
self.info.scenes[self.scene].printActiveRenderlayer()
print('\n')
def renamePreset(self, old, new):
if self.preset == old:
self.preset = new
def erasePreset(self, preset):
if self.preset == preset:
self.preset = '[default]'
def getRow(self):
name = self.path.split('/').pop()
return columnLimit(' '+name, 25, 5)\
+columnLimit(' '+self.scene, 25, 5)\
+columnLimit(' '+self.preset, 25, 5)
def presetChoice(log, preferences):
log.menuIn('Preset Choice')
log.print()
print('\n\n \033[4mPreset Choice :\033[0m\n\n')
confirm = input('Use «'+preferences.presets.default+'» default preset? (type anything else that y or yes to choose another one)')
if confirm in ['', 'y', 'yes']:
log.menuOut()
return '[default]'
else:
preset = preferences.presets.choose(log)
log.menuOut()
return preset
def editPreset(self, log, preferences):
log.error('Warning : all change made to the preset will be effective for all task that use it…')
if self.preset == '[default]' :
name = preferences.presets.default
preset = preferences.presets.presets[name]
else:
name = self.preset
preset = preferences.presets.presets[name]
if type(preset) is Preset:
confirm = preset.menu(log, name, preferences.blenderVersion)
else:
confirm = preset.menu(log, name, preferences.presets)
if confirm:
savePreferences(preferences)
def copy(self):
xml = '<?xml version="1.0" encoding="UTF-8"?>\n'+self.toXml()
xml = xmlMod.fromstring(xml)
copy = Task(xml = xml)
copy.uid = uuid.uuid4().hex
return copy
def printRunMenu(self, index, count, log):
log.print()
print('\n\nRun task n°'+str(index)+' of '+str(count)+' :\n\n')
if self.log is not None:
self.log.print()
log.runPrint()
def run(self, index, taskList, scriptPath, log, preferences):
log.menuIn('run Task '+str(index)+' from '+str(len(taskList.tasks)))
if self.log is None:
self.log = TaskLog(pref = preferences, task = self)
preferences.output.checkAndCreate(self, preferences, taskList)
self.printRunMenu(index, len(taskList.tasks), log)
metapreset = self.log.preset
if type(metapreset) is Preset:
if self.log.groups[0].remaining() > 0:
versions = { metapreset.engine.version : '[default]' }
else:
versions = {}
for group in self.log.groups:
if group.remaining() > 0:
if group.preset.engine.version in versions.keys():
versions[group.preset.engine.version].append(group.name)
else:
versions[group.preset.engine.version] = [group.name]
scripts = self.createTaskScript(scriptPath, preferences, versions, metapreset)
results = ''
for version in versions.keys():
try:
l = threading.Thread(target = self.socketAcceptClient,
args=(taskList, index, log))
l.start()
taskList.listenerThreads.append(l)
sub = subprocess.Popen(\
shlex.split(\
preferences.blenderVersion.getVersionPath(version)\
+' -b "'+self.path+'" -P "'\
+scripts[version]+'"'),\
stdout = subprocess.PIPE,\
stdin = subprocess.PIPE,\
stderr = subprocess.PIPE)
taskList.renderingSubprocess.append(sub)
result = sub.communicate()
taskList.renderingSubprocess.remove(sub)
results += result[0].decode()+result[1].decode()+'\n\n\n'
except FileNotFoundError:
log.write('\033[31mTask n°'+str(index)+' : Blender version call error! Try to verify the path of «'+version+'» blender version!\033[0m')
if taskList.runningMode in [taskList.UNTIL_GROUP_END,\
taskList.UNTIL_FRAME_END,\
taskList.STOP_NOW,\
taskList.STOP_FORCED]:
break
self.eraseTaskScript(scripts)
log.menuOut()
return True
def socketAcceptClient(self, taskList, index, log):
client = taskList.socket.accept()[0]
taskList.listenerSockets.append(
{
'socket':client,
'uid':self.uid
}
)
msg = ''
while taskList.runningMode < taskList.STOP_NOW:
msg += client.recv(1024).decode()
if msg == '':
time.sleep(1)
elif msg == self.uid+' VersionEnded EOS':
break
else:
msg = self.treatSocketMessage(msg, taskList, index, log)
client.close()
def treatSocketMessage(self, msg, taskList, index, log):
if msg[-4:] != ' EOS':
return msg
messages = msg.split(' EOS')
messages.pop()
for m in messages:
uid = m[0:32]
action = m[33:m.find('(')]
info = m[46:-1]
if uid == self.uid and action == 'debugMsg':
log.write(info)
elif uid == self.uid and action == 'ConfirmFrame':
info = info.split(',')
group = info[0]
frame = int(info[1])
computingTime = float(info[3])
date = info[2].split(':')
date = datetime.datetime(
year = int(date[2]),
month = int(date[1]),
day = int(date[0]),
hour = int(date[3]),
minute = int(date[4]),
second = int(date[5])
)
self.log.getGroup(group).confirmFrame(frame, date, computingTime)
self.printRunMenu(index, len(taskList.tasks), log)
if messages[-1] == self.uid+' VersionEnded':
return messages[-1]+' EOS'
else:
return ''
def createTaskScript(self, scriptPath, preferences, versions, preset):
start = '''#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
''\'module to manage metapreset''\'
import sys
sys.path.append("'''+scriptPath+'''")
import xml.etree.ElementTree as xmlMod
from Preferences.Preferences import *
from Preferences.PresetList.Preset.Preset import *
from Preferences.PresetList.Preset.Metapreset import *
from TaskList.RenderingTask.RenderingTask import *
from TaskList.Task import *
preferences = Preferences( xml = xmlMod.fromstring(''\''''+preferences.toXml(False)+'''''\') )
task = Task( xml = xmlMod.fromstring(''\'<?xml version="1.0" encoding="UTF-8"?>\n'''+self.toXml()+'''''\'))
'''
end = '\nRenderingTask(task, preferences, groups)'
paths = {}
for v, g in versions.items():
script = start\
+'groups = ["'+('", "'.join(g) )+'"]\n'\
+end
paths[v] = scriptPath+'/TaskList/RenderingTask/TaskScripts/'+self.uid+'-'+v+'.py'
with open(paths[v],'w') as taskScriptFile:
taskScriptFile.write( script )
return paths
def eraseTaskScript(self, scripts):
for path in scripts.values():
os.remove(path)
def getUsefullGroup(self, groups, preferences):
renderlayers = self.info.scenes[self.scene].getActiveRenderlayers()
confirmed = []
for group in groups:
for RL in renderlayers:
if preferences.presets.renderlayers.groups[group].belongTo(RL.name):
confirmed.append(group)
break
return confirmed
| true | true |
f71bd874cf71759c3ef1a835fde21dc3e3e05f61 | 3,201 | py | Python | Finetune/cosface_finetune.py | yuhaoooo/FaceAdv | 73e27b7ca01243a9a3d115f5fabd1008b2afb34a | [
"MIT"
] | 1 | 2022-03-04T09:18:04.000Z | 2022-03-04T09:18:04.000Z | Finetune/cosface_finetune.py | yuhaoooo/FaceAdv | 73e27b7ca01243a9a3d115f5fabd1008b2afb34a | [
"MIT"
] | null | null | null | Finetune/cosface_finetune.py | yuhaoooo/FaceAdv | 73e27b7ca01243a9a3d115f5fabd1008b2afb34a | [
"MIT"
] | null | null | null | import os
import torch
import random
import numpy as np
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from module.units.cosface_module import CosFace
def accuracy(logits, y):
_, preds = torch.max(logits, 1)
return (preds == y).float().mean()
if __name__ == "__main__":
random.seed(117)
np.random.seed(117)
torch.manual_seed(117)
torch.cuda.manual_seed(117)
transform = transforms.Compose([
transforms.Resize((112, 96)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
cosface = CosFace(classnum=156, pretrained=r'..\Auxiliary\PretrainedFeatureExtractor\ACC99.28.pth').to(device)
dataset_dir = r'..\Auxiliary\ClippedFaceBank'
dataset = datasets.ImageFolder(
dataset_dir, transform=transform)
len_imgs = int(len(dataset) * 0.2)
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len(dataset) - len_imgs, len_imgs])
batch_size = 32
workers = 0 if os.name == 'nt' else 8
epochs = 20
train_loader = DataLoader(
train_dataset,
num_workers=workers,
batch_size=batch_size,
shuffle=True
)
test_loader = DataLoader(
test_dataset,
num_workers=workers,
batch_size=1,
shuffle=False
)
optimizer = optim.Adam(cosface.logits.parameters(), lr=1e-3)
loss_fn = torch.nn.CrossEntropyLoss()
cosface.backbone.eval()
best_acc, best_state_dict = 0., {}
for epoch in range(epochs):
print('\nEpoch {}/{}'.format(epoch + 1, epochs))
print('-' * 10)
cosface.logits.train()
loss = 0.0
acc = 0.0
for i_batch, (x, y) in enumerate(train_loader):
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
y_pred = cosface(x)
loss_batch = loss_fn(y_pred, y)
# update
loss_batch.backward()
optimizer.step()
loss += loss_batch.detach().cpu().numpy()
acc += accuracy(y_pred, y).detach().cpu().numpy()
loss /= (i_batch + 1)
acc /= (i_batch + 1)
print('The train loss is {}, The accuracy is {}'.format(loss, acc))
cosface.logits.eval()
loss, acc = 0.0, 0.0
for i_batch, (x, y) in enumerate(test_loader):
x = x.to(device)
y = y.to(device)
y_pred = cosface(x)
loss_batch = loss_fn(y_pred, y)
# update
loss += loss_batch.detach().cpu().numpy()
acc += accuracy(y_pred, y).detach().cpu().numpy()
loss /= (i_batch + 1)
acc /= (i_batch + 1)
print('The test loss is {}, The accuracy is {}'.format(loss, acc))
if best_acc < acc:
best_acc = acc
best_state_dict = cosface.state_dict()
os.makedirs(r'..\Auxiliary\PretrainedFaceRecognizer', exist_ok=True)
torch.save(best_state_dict, r'..\Auxiliary\PretrainedFaceRecognizer\finetuned_cosface.pt')
| 31.07767 | 114 | 0.590753 | import os
import torch
import random
import numpy as np
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from module.units.cosface_module import CosFace
def accuracy(logits, y):
_, preds = torch.max(logits, 1)
return (preds == y).float().mean()
if __name__ == "__main__":
random.seed(117)
np.random.seed(117)
torch.manual_seed(117)
torch.cuda.manual_seed(117)
transform = transforms.Compose([
transforms.Resize((112, 96)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
cosface = CosFace(classnum=156, pretrained=r'..\Auxiliary\PretrainedFeatureExtractor\ACC99.28.pth').to(device)
dataset_dir = r'..\Auxiliary\ClippedFaceBank'
dataset = datasets.ImageFolder(
dataset_dir, transform=transform)
len_imgs = int(len(dataset) * 0.2)
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len(dataset) - len_imgs, len_imgs])
batch_size = 32
workers = 0 if os.name == 'nt' else 8
epochs = 20
train_loader = DataLoader(
train_dataset,
num_workers=workers,
batch_size=batch_size,
shuffle=True
)
test_loader = DataLoader(
test_dataset,
num_workers=workers,
batch_size=1,
shuffle=False
)
optimizer = optim.Adam(cosface.logits.parameters(), lr=1e-3)
loss_fn = torch.nn.CrossEntropyLoss()
cosface.backbone.eval()
best_acc, best_state_dict = 0., {}
for epoch in range(epochs):
print('\nEpoch {}/{}'.format(epoch + 1, epochs))
print('-' * 10)
cosface.logits.train()
loss = 0.0
acc = 0.0
for i_batch, (x, y) in enumerate(train_loader):
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
y_pred = cosface(x)
loss_batch = loss_fn(y_pred, y)
loss_batch.backward()
optimizer.step()
loss += loss_batch.detach().cpu().numpy()
acc += accuracy(y_pred, y).detach().cpu().numpy()
loss /= (i_batch + 1)
acc /= (i_batch + 1)
print('The train loss is {}, The accuracy is {}'.format(loss, acc))
cosface.logits.eval()
loss, acc = 0.0, 0.0
for i_batch, (x, y) in enumerate(test_loader):
x = x.to(device)
y = y.to(device)
y_pred = cosface(x)
loss_batch = loss_fn(y_pred, y)
loss += loss_batch.detach().cpu().numpy()
acc += accuracy(y_pred, y).detach().cpu().numpy()
loss /= (i_batch + 1)
acc /= (i_batch + 1)
print('The test loss is {}, The accuracy is {}'.format(loss, acc))
if best_acc < acc:
best_acc = acc
best_state_dict = cosface.state_dict()
os.makedirs(r'..\Auxiliary\PretrainedFaceRecognizer', exist_ok=True)
torch.save(best_state_dict, r'..\Auxiliary\PretrainedFaceRecognizer\finetuned_cosface.pt')
| true | true |
f71bd9829999b0a992bd60e9355172969776fbbe | 881 | py | Python | tests.py | gmr/ecs-pipeline-deploy | 1e3e93a2497f4697c01de093c73a5cd78eebad63 | [
"BSD-3-Clause"
] | 2 | 2018-08-13T17:48:58.000Z | 2018-10-29T20:10:11.000Z | tests.py | gmr/ecs-pipeline-deploy | 1e3e93a2497f4697c01de093c73a5cd78eebad63 | [
"BSD-3-Clause"
] | null | null | null | tests.py | gmr/ecs-pipeline-deploy | 1e3e93a2497f4697c01de093c73a5cd78eebad63 | [
"BSD-3-Clause"
] | 1 | 2018-08-09T18:34:58.000Z | 2018-08-09T18:34:58.000Z | # coding=utf-8
import unittest
from ecs_pipeline_deploy import cli
class TestImageParsing(unittest.TestCase):
IMAGES = {
'alpine': (None, 'alpine', 'latest'),
'alpine:3.7': (None, 'alpine', '3.7'),
'docker.aweber.io/_/alpine:3.7':
('docker.aweber.io', '_/alpine', '3.7'),
'docker.aweber.io/pse/anabroker:0.1.0':
('docker.aweber.io', 'pse/anabroker', '0.1.0'),
'docker.aweber.io:8000/pse/anabroker:latest':
('docker.aweber.io:8000', 'pse/anabroker', 'latest')
}
def test_parsing_expectations(self):
for image, expectation in self.IMAGES.items():
result = cli.ECSPipeline.parse_image(image)
self.assertEqual(result, expectation)
def test_parsing_exceptions(self):
with self.assertRaises(ValueError):
cli.ECSPipeline.parse_image(None)
| 32.62963 | 64 | 0.61521 |
import unittest
from ecs_pipeline_deploy import cli
class TestImageParsing(unittest.TestCase):
IMAGES = {
'alpine': (None, 'alpine', 'latest'),
'alpine:3.7': (None, 'alpine', '3.7'),
'docker.aweber.io/_/alpine:3.7':
('docker.aweber.io', '_/alpine', '3.7'),
'docker.aweber.io/pse/anabroker:0.1.0':
('docker.aweber.io', 'pse/anabroker', '0.1.0'),
'docker.aweber.io:8000/pse/anabroker:latest':
('docker.aweber.io:8000', 'pse/anabroker', 'latest')
}
def test_parsing_expectations(self):
for image, expectation in self.IMAGES.items():
result = cli.ECSPipeline.parse_image(image)
self.assertEqual(result, expectation)
def test_parsing_exceptions(self):
with self.assertRaises(ValueError):
cli.ECSPipeline.parse_image(None)
| true | true |
f71bd9cfe2794743457c4c581ce15146ca56acf0 | 3,438 | py | Python | trac_captcha/release.py | FelixSchwarz/trac-captcha | 90eb4d3b4dae297e23f09a99a91bcfabcd099dc6 | [
"MIT"
] | 1 | 2020-10-23T14:59:42.000Z | 2020-10-23T14:59:42.000Z | trac_captcha/release.py | FelixSchwarz/trac-captcha | 90eb4d3b4dae297e23f09a99a91bcfabcd099dc6 | [
"MIT"
] | null | null | null | trac_captcha/release.py | FelixSchwarz/trac-captcha | 90eb4d3b4dae297e23f09a99a91bcfabcd099dc6 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
name = 'TracCaptcha'
version = '0.4dev'
description = 'pluggable captcha infrastructure for Trac with reCAPTCHA included'
long_description = '''
TracCaptcha is a Trac plugin to embed a captcha in the ticket page in addition
to Trac's regular permission checks so that spammers are kept out.
**"It just works":** Installation and configuration is very simple, just
install the egg and put two configuration options in your trac.ini. No
database changes required.
**Batteries included:** The popular reCAPTCHA system is supported out of the box.
Technically it's a plugin - if you don't like it you're free to use any other
plugin while still leverage the benefits from the general captcha
infrastructure.
**Does not annoy users:** After the user entered the captcha once, he does not have
to solve the captcha again for the same ticket when he just clicks 'preview'.
Also you can configure exempt certain users or groups (e.g. 'all authenticated
users') from the captchas just by using Trac's permission system.
**Easy to extend:** Protecting an additional page with a captcha is very
simple. Implementing captchas for the ticket module took only 20 lines of code!
Captchas for the DiscussionPlugin needed 21 lines of code!
**Easy to write custom captchas:** If you don't like reCAPTCHA, you can still
use the generic infrastructure with all its features: You implement the code to
generate the captcha and validate the user's input. TracCaptcha will take care
of displaying your plugin in all supported pages!
Changelog
******************************
0.?.? (??.??.2012)
====================
- fixed a compatibility issue for Python 2.4 with separately installed
hashlib module
- reCAPTCHA: sometimes scripts were included with HTTP even though the page was
served with HTTPS
0.3.1 (30.03.2011)
====================
- add config option to omit noscript section for recaptcha to enforce Javascript
support for users
0.3 (25.03.2011)
====================
- add more debug logging about CAPTCHA display and accepted/rejected solutions
to identify better how spammers managed to file a spam ticket
0.2.2 (04.02.2011)
====================
- fix tests on current Trac trunk (0.13dev)
- fix: TICKET_ADMIN looses other ticket-related permissions on Trac < 0.13
thanks to Anton V. for reporting
0.2.1 (10.11.2010)
====================
- fix "installation" as egg file in Trac plugins folder
0.2 (10.07.2010)
====================
- integration in 3rd party Trac plugins: TracDiscussionPlugin and
AccountManager (registration only)
- reCAPTCHA: select widget theme via trac.ini (requires simplejson for
Python 2.3-2.5)
- reCAPTCHA: display the widget in the user's locale (if translation is provided
by the reCAPTCHA service)
- reCAPTCHA: use HTTPS to include script files if Trac page was served with
HTTPS
- reCAPTCHA: show link for reCAPTCHA signup if no keys configured
- reCAPTCHA: use new Google URLs
0.1 (25.06.2010)
==================
- initial release
'''
author = 'Felix Schwarz'
author_email = 'felix.schwarz@oss.schwarz.eu'
url = 'http://www.schwarz.eu/opensource/projects/trac_captcha/'
download_url = 'http://www.schwarz.eu/opensource/projects/trac_captcha/download/%(version)s/%(name)s-%(version)s.tar.gz' % dict(name=name, version=version)
# prefix it with '_' so the symbol is not passed to setuptools.setup()
_copyright = u'2010 Felix Schwarz'
license='MIT'
| 38.629213 | 155 | 0.726294 |
name = 'TracCaptcha'
version = '0.4dev'
description = 'pluggable captcha infrastructure for Trac with reCAPTCHA included'
long_description = '''
TracCaptcha is a Trac plugin to embed a captcha in the ticket page in addition
to Trac's regular permission checks so that spammers are kept out.
**"It just works":** Installation and configuration is very simple, just
install the egg and put two configuration options in your trac.ini. No
database changes required.
**Batteries included:** The popular reCAPTCHA system is supported out of the box.
Technically it's a plugin - if you don't like it you're free to use any other
plugin while still leverage the benefits from the general captcha
infrastructure.
**Does not annoy users:** After the user entered the captcha once, he does not have
to solve the captcha again for the same ticket when he just clicks 'preview'.
Also you can configure exempt certain users or groups (e.g. 'all authenticated
users') from the captchas just by using Trac's permission system.
**Easy to extend:** Protecting an additional page with a captcha is very
simple. Implementing captchas for the ticket module took only 20 lines of code!
Captchas for the DiscussionPlugin needed 21 lines of code!
**Easy to write custom captchas:** If you don't like reCAPTCHA, you can still
use the generic infrastructure with all its features: You implement the code to
generate the captcha and validate the user's input. TracCaptcha will take care
of displaying your plugin in all supported pages!
Changelog
******************************
0.?.? (??.??.2012)
====================
- fixed a compatibility issue for Python 2.4 with separately installed
hashlib module
- reCAPTCHA: sometimes scripts were included with HTTP even though the page was
served with HTTPS
0.3.1 (30.03.2011)
====================
- add config option to omit noscript section for recaptcha to enforce Javascript
support for users
0.3 (25.03.2011)
====================
- add more debug logging about CAPTCHA display and accepted/rejected solutions
to identify better how spammers managed to file a spam ticket
0.2.2 (04.02.2011)
====================
- fix tests on current Trac trunk (0.13dev)
- fix: TICKET_ADMIN looses other ticket-related permissions on Trac < 0.13
thanks to Anton V. for reporting
0.2.1 (10.11.2010)
====================
- fix "installation" as egg file in Trac plugins folder
0.2 (10.07.2010)
====================
- integration in 3rd party Trac plugins: TracDiscussionPlugin and
AccountManager (registration only)
- reCAPTCHA: select widget theme via trac.ini (requires simplejson for
Python 2.3-2.5)
- reCAPTCHA: display the widget in the user's locale (if translation is provided
by the reCAPTCHA service)
- reCAPTCHA: use HTTPS to include script files if Trac page was served with
HTTPS
- reCAPTCHA: show link for reCAPTCHA signup if no keys configured
- reCAPTCHA: use new Google URLs
0.1 (25.06.2010)
==================
- initial release
'''
author = 'Felix Schwarz'
author_email = 'felix.schwarz@oss.schwarz.eu'
url = 'http://www.schwarz.eu/opensource/projects/trac_captcha/'
download_url = 'http://www.schwarz.eu/opensource/projects/trac_captcha/download/%(version)s/%(name)s-%(version)s.tar.gz' % dict(name=name, version=version)
_copyright = u'2010 Felix Schwarz'
license='MIT'
| true | true |
f71bdaca1e11f3f5800e7701806de0a2ef37a0d7 | 4,092 | py | Python | setup.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 867 | 2016-12-05T20:24:23.000Z | 2022-02-18T09:07:14.000Z | setup.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 90 | 2017-01-14T22:46:23.000Z | 2021-02-09T13:32:27.000Z | setup.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 135 | 2017-01-18T19:21:20.000Z | 2022-01-24T16:57:59.000Z | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
###############################################################################
from __future__ import print_function
import sys
###############################################################################
def error_message(msg):
""" Prints an error message and exits.
"""
line_width = 60
format_spec = '{{: ^{width}}}'.format(width=line_width)
lines = [
'', '',
'='*line_width, '',
'ERROR', '',
msg, ''
'See our troubleshooting page to get started:', '',
'https://kur.deepgram.com/troubleshooting.html#installation', '',
'='*line_width, '',
"Uh, oh. There was an error. Look up there ^^^^ and you'll be",
'training awesome models in no time!'
]
for line in lines:
print(format_spec.format(line), file=sys.stderr)
sys.exit(1)
###############################################################################
if sys.version_info < (3, 4):
error_message('Kur requires Python 3.4 or later.')
###############################################################################
# pylint: disable=wrong-import-position
import os
from setuptools import setup, find_packages
# pylint: enable=wrong-import-position
################################################################################
def readme():
""" Return the README text.
"""
with open('README.rst', 'rb') as fh:
result = fh.read()
result = result.decode('utf-8')
token = '.. package_readme_ends_here'
mark = result.find(token)
if mark >= 0:
result = result[:mark]
token = '.. package_readme_starts_here'
mark = result.find(token)
if mark >= 0:
result = result[mark+len(token):]
chunks = []
skip = False
for chunk in result.split('\n\n'):
if not chunk:
pass
elif chunk.strip().startswith('.. package_readme_ignore'):
skip = True
elif skip:
skip = False
else:
chunks.append(chunk)
result = '\n\n'.join(chunks)
return result
################################################################################
def get_version():
""" Gets the current version of the package.
"""
version_py = os.path.join(os.path.dirname(__file__), 'kur', 'version.py')
with open(version_py, 'r') as fh:
for line in fh:
if line.startswith('__version__'):
return line.split('=')[-1].strip().replace('"', '')
raise ValueError('Failed to parse version from: {}'.format(version_py))
################################################################################
setup(
# Package information
name='kur',
version=get_version(),
description='Descriptive deep learning',
long_description=readme(),
keywords='deep learning',
classifiers=[
],
# Author information
url='https://github.com/deepgram/kur',
author='Adam Sypniewski',
author_email='adam@deepgram.com',
license='Apache Software License '
'(http://www.apache.org/licenses/LICENSE-2.0)',
# What is packaged here.
packages=find_packages(),
# What to include.
package_data={
'': ['*.txt', '*.rst', '*.md']
},
# Dependencies
install_requires=[
'pyyaml>=3.12',
'jinja2>=2.8',
'numpy>=1.11.2',
'tqdm>=4.10.0',
# Keras - the default backend (with Theano)
'keras>=1.2.2',
'theano>=0.8.2',
'scipy>=0.18.1',
'python-magic>=0.4.12',
'pydub>=0.16.6',
'python_speech_features>=0.4',
'matplotlib>=1.5.3'
],
dependency_links=[
],
# Testing
test_suite='tests',
tests_require=[
'pytest',
'tensorflow'
],
setup_requires=['pytest-runner'],
entry_points={
'console_scripts' : ['kur=kur.__main__:main']
},
zip_safe=False
)
#### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| 25.575 | 80 | 0.585533 | true | true | |
f71bdb90d7af4d9b66887e0eed93bbd899a25113 | 1,264 | py | Python | tests/test_blog.py | Geerocktricks/Blogs4Keeps | 5a91a1c9ec38ecc50b5831d526dd376463a56210 | [
"MIT"
] | null | null | null | tests/test_blog.py | Geerocktricks/Blogs4Keeps | 5a91a1c9ec38ecc50b5831d526dd376463a56210 | [
"MIT"
] | null | null | null | tests/test_blog.py | Geerocktricks/Blogs4Keeps | 5a91a1c9ec38ecc50b5831d526dd376463a56210 | [
"MIT"
] | null | null | null | from app.models import Blog , User
from app import db
def setUp(self):
self.user_Gerald = User(username = 'Gerald',password = 'potato', email = 'geerockface4@gmail.com')
self.new_blog = Blog(id=12345,title='Full stack development',content='Is it safe to go the full-stack way or better the Android path',category= "technology" , date = 1-12-2019, time = 16:04 ,user = self.user_Gerald )
def tearDown(self):
Blog.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_blog.id,12345)
self.assertEquals(self.new_blog.title,'Full stack development')
self.assertEquals(self.new_blog.content,"Is it safe to go the full-stack way or better the Android path")
self.assertEquals(self.new_blog.category,'technology')
self.assertEquals(self.new_blog.date, 1-12-2019)
self.assertEquals(self.new_blog. time,16:04)
self.assertEquals(self.new_blog.user,self.user_Gerald)
def test_save_blog(self):
self.new_blog.save_blog()
self.assertTrue(len(Review.query.all())>0)
def test_get_blog_by_id(self):
self.new_blog.save_review()
got_blogs = Blog.get_blogs(12345)
self.assertTrue(len(got_blogs) == 1) | 40.774194 | 224 | 0.699367 | from app.models import Blog , User
from app import db
def setUp(self):
self.user_Gerald = User(username = 'Gerald',password = 'potato', email = 'geerockface4@gmail.com')
self.new_blog = Blog(id=12345,title='Full stack development',content='Is it safe to go the full-stack way or better the Android path',category= "technology" , date = 1-12-2019, time = 16:04 ,user = self.user_Gerald )
def tearDown(self):
Blog.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_blog.id,12345)
self.assertEquals(self.new_blog.title,'Full stack development')
self.assertEquals(self.new_blog.content,"Is it safe to go the full-stack way or better the Android path")
self.assertEquals(self.new_blog.category,'technology')
self.assertEquals(self.new_blog.date, 1-12-2019)
self.assertEquals(self.new_blog. time,16:04)
self.assertEquals(self.new_blog.user,self.user_Gerald)
def test_save_blog(self):
self.new_blog.save_blog()
self.assertTrue(len(Review.query.all())>0)
def test_get_blog_by_id(self):
self.new_blog.save_review()
got_blogs = Blog.get_blogs(12345)
self.assertTrue(len(got_blogs) == 1) | false | true |
f71bdbabe6cef239bb72385c05db3589ac1298ec | 1,226 | py | Python | redash/cli/queries.py | zero1number/redash | caabc4afa4e60e273782a46d84099857821c6500 | [
"BSD-2-Clause"
] | 20,680 | 2015-11-16T15:38:37.000Z | 2022-03-31T21:43:43.000Z | redash/cli/queries.py | zero1number/redash | caabc4afa4e60e273782a46d84099857821c6500 | [
"BSD-2-Clause"
] | 3,934 | 2015-11-16T14:46:49.000Z | 2022-03-31T13:22:31.000Z | redash/cli/queries.py | zero1number/redash | caabc4afa4e60e273782a46d84099857821c6500 | [
"BSD-2-Clause"
] | 4,147 | 2015-11-17T15:57:23.000Z | 2022-03-31T11:57:43.000Z | from click import argument
from flask.cli import AppGroup
from sqlalchemy.orm.exc import NoResultFound
manager = AppGroup(help="Queries management commands.")
@manager.command()
@argument("query_id")
@argument("tag")
def add_tag(query_id, tag):
from redash import models
query_id = int(query_id)
try:
q = models.Query.get_by_id(query_id)
except NoResultFound:
print("Query not found.")
exit(1)
tags = q.tags
if tags is None:
tags = []
tags.append(tag)
q.tags = list(set(tags))
models.db.session.add(q)
models.db.session.commit()
print("Tag added.")
@manager.command()
@argument("query_id")
@argument("tag")
def remove_tag(query_id, tag):
from redash import models
query_id = int(query_id)
try:
q = models.Query.get_by_id(query_id)
except NoResultFound:
print("Query not found.")
exit(1)
tags = q.tags
if tags is None:
print("Tag is empty.")
exit(1)
try:
tags.remove(tag)
except ValueError:
print("Tag not found.")
exit(1)
q.tags = list(set(tags))
models.db.session.add(q)
models.db.session.commit()
print("Tag removed.")
| 18.861538 | 55 | 0.620718 | from click import argument
from flask.cli import AppGroup
from sqlalchemy.orm.exc import NoResultFound
manager = AppGroup(help="Queries management commands.")
@manager.command()
@argument("query_id")
@argument("tag")
def add_tag(query_id, tag):
from redash import models
query_id = int(query_id)
try:
q = models.Query.get_by_id(query_id)
except NoResultFound:
print("Query not found.")
exit(1)
tags = q.tags
if tags is None:
tags = []
tags.append(tag)
q.tags = list(set(tags))
models.db.session.add(q)
models.db.session.commit()
print("Tag added.")
@manager.command()
@argument("query_id")
@argument("tag")
def remove_tag(query_id, tag):
from redash import models
query_id = int(query_id)
try:
q = models.Query.get_by_id(query_id)
except NoResultFound:
print("Query not found.")
exit(1)
tags = q.tags
if tags is None:
print("Tag is empty.")
exit(1)
try:
tags.remove(tag)
except ValueError:
print("Tag not found.")
exit(1)
q.tags = list(set(tags))
models.db.session.add(q)
models.db.session.commit()
print("Tag removed.")
| true | true |
f71bdc17b0e03a9554e767de798e80d4152e68b0 | 4,093 | py | Python | scripts/mk_copy_coords.py | diogomart/Meeko | 8af7466fd45a5d5de00a2d188ee3d4531b8372a5 | [
"Apache-2.0"
] | 4 | 2021-08-04T07:25:48.000Z | 2021-09-22T09:57:35.000Z | scripts/mk_copy_coords.py | diogomart/Meeko | 8af7466fd45a5d5de00a2d188ee3d4531b8372a5 | [
"Apache-2.0"
] | 4 | 2021-08-16T19:05:19.000Z | 2021-10-04T22:21:35.000Z | scripts/mk_copy_coords.py | diogomart/Meeko | 8af7466fd45a5d5de00a2d188ee3d4531b8372a5 | [
"Apache-2.0"
] | 2 | 2021-09-18T12:10:12.000Z | 2021-09-22T06:13:02.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
import argparse
import os
import sys
from rdkit import Chem
from rdkit.six import StringIO
from meeko import PDBQTMolecule
def cmd_lineparser():
parser = argparse.ArgumentParser(description='Copy atom coordinates from PDBQT (or DLG) file \
to original molecule file format (SDF or MOL2)')
parser.add_argument(dest='docking_results_filename',
action='store', help='Docking output file to get coordinates. Either a PDBQT \
file from Vina or a DLG file from AD-GPU.')
parser.add_argument('-i', '--original_input', dest='template_filename',
action='store', help='Template molecule file, i.e. the original file that was \
used to prepare the PDBQT filename (hopefully SDF). If no template is provided, \
the SMILES string in the PDBQT remarks will be used to generate an SDF file.')
parser.add_argument('-o', '--output_filename', dest='output_filename',
action='store', help='Output molecule filename. If not specified, suffix _docked is \
added to the filename based on the input molecule file, and using the same \
molecule file format')
parser.add_argument('-s', '--suffix', dest='suffix_name', default='_docked',
action='store', help='Add suffix to output filename if -o/--output_filename \
not specified. WARNING: If specified as empty string (\'\'), this will overwrite \
the original molecule input file (default: _docked).')
parser.add_argument('-', '--', dest='redirect_stdout', action='store_true',
help='do not write file, redirect output to STDOUT. Arguments -o/--output_filename \
is ignored.')
return parser.parse_args()
if __name__ == '__main__':
args = cmd_lineparser()
docking_results_filename = args.docking_results_filename
template_filename = args.template_filename
output_filename = args.output_filename
suffix_name = args.suffix_name
redirect_stdout = args.redirect_stdout
output_string = ''
is_dlg = docking_results_filename.endswith('.dlg')
pdbqt_mol = PDBQTMolecule.from_file(docking_results_filename, is_dlg=is_dlg, skip_typing=True)
if template_filename is not None: # OBMol from template_filename
if output_filename is not None:
output_format = os.path.splitext(output_filename)[1][1:]
else:
output_format = os.path.splitext(template_filename)[1][1:]
conv = ob.OBConversion()
success = conv.SetOutFormat(output_format)
if not success:
raise RuntimeError('Input molecule file format %s not recognized by OpenBabel.' % output_format)
ori_obmol = obutils.load_molecule_from_file(template_filename)
for pose in pdbqt_mol:
copy_obmol = ob.OBMol(ori_obmol) # connectivity may be corrupted by removing and adding Hs multiple times
pose.copy_coordinates_to_obmol(copy_obmol)
output_string += conv.WriteString(copy_obmol)
else: # RDKit mol from SMILES in docking output PDBQT remarks
if pdbqt_mol._pose_data['smiles'] is None:
msg = "\n\n \"REMARK SMILES\" not found in %s.\n" % docking_results_filename
msg += " Consider using -i/--original_input\n"
raise RuntimeError(msg)
sio = StringIO()
f = Chem.SDWriter(sio)
for pose in pdbqt_mol:
rdmol = pose.export_rdkit_mol()
f.write(rdmol)
f.close()
output_string += sio.getvalue()
output_format = 'sdf'
if not redirect_stdout:
if output_filename is None:
output_filename = '%s%s.%s' % (os.path.splitext(docking_results_filename)[0], suffix_name, output_format)
print(output_string, file=open(output_filename, 'w'))
else:
print(output_string)
| 46.511364 | 117 | 0.636697 |
import argparse
import os
import sys
from rdkit import Chem
from rdkit.six import StringIO
from meeko import PDBQTMolecule
def cmd_lineparser():
parser = argparse.ArgumentParser(description='Copy atom coordinates from PDBQT (or DLG) file \
to original molecule file format (SDF or MOL2)')
parser.add_argument(dest='docking_results_filename',
action='store', help='Docking output file to get coordinates. Either a PDBQT \
file from Vina or a DLG file from AD-GPU.')
parser.add_argument('-i', '--original_input', dest='template_filename',
action='store', help='Template molecule file, i.e. the original file that was \
used to prepare the PDBQT filename (hopefully SDF). If no template is provided, \
the SMILES string in the PDBQT remarks will be used to generate an SDF file.')
parser.add_argument('-o', '--output_filename', dest='output_filename',
action='store', help='Output molecule filename. If not specified, suffix _docked is \
added to the filename based on the input molecule file, and using the same \
molecule file format')
parser.add_argument('-s', '--suffix', dest='suffix_name', default='_docked',
action='store', help='Add suffix to output filename if -o/--output_filename \
not specified. WARNING: If specified as empty string (\'\'), this will overwrite \
the original molecule input file (default: _docked).')
parser.add_argument('-', '--', dest='redirect_stdout', action='store_true',
help='do not write file, redirect output to STDOUT. Arguments -o/--output_filename \
is ignored.')
return parser.parse_args()
if __name__ == '__main__':
args = cmd_lineparser()
docking_results_filename = args.docking_results_filename
template_filename = args.template_filename
output_filename = args.output_filename
suffix_name = args.suffix_name
redirect_stdout = args.redirect_stdout
output_string = ''
is_dlg = docking_results_filename.endswith('.dlg')
pdbqt_mol = PDBQTMolecule.from_file(docking_results_filename, is_dlg=is_dlg, skip_typing=True)
if template_filename is not None:
if output_filename is not None:
output_format = os.path.splitext(output_filename)[1][1:]
else:
output_format = os.path.splitext(template_filename)[1][1:]
conv = ob.OBConversion()
success = conv.SetOutFormat(output_format)
if not success:
raise RuntimeError('Input molecule file format %s not recognized by OpenBabel.' % output_format)
ori_obmol = obutils.load_molecule_from_file(template_filename)
for pose in pdbqt_mol:
copy_obmol = ob.OBMol(ori_obmol)
pose.copy_coordinates_to_obmol(copy_obmol)
output_string += conv.WriteString(copy_obmol)
else:
if pdbqt_mol._pose_data['smiles'] is None:
msg = "\n\n \"REMARK SMILES\" not found in %s.\n" % docking_results_filename
msg += " Consider using -i/--original_input\n"
raise RuntimeError(msg)
sio = StringIO()
f = Chem.SDWriter(sio)
for pose in pdbqt_mol:
rdmol = pose.export_rdkit_mol()
f.write(rdmol)
f.close()
output_string += sio.getvalue()
output_format = 'sdf'
if not redirect_stdout:
if output_filename is None:
output_filename = '%s%s.%s' % (os.path.splitext(docking_results_filename)[0], suffix_name, output_format)
print(output_string, file=open(output_filename, 'w'))
else:
print(output_string)
| true | true |
f71bdcb3835402e5205a96a5e1e0df7c469df44e | 160 | py | Python | bidfx/__init__.py | bidfx/bidfx-api-py | 6b5e2c5efaa547b2d97a5556ef8d21d1de807f68 | [
"Apache-2.0"
] | 3 | 2020-04-29T09:19:56.000Z | 2021-03-08T11:12:05.000Z | bidfx/__init__.py | bidfx/bidfx-api-py | 6b5e2c5efaa547b2d97a5556ef8d21d1de807f68 | [
"Apache-2.0"
] | 3 | 2020-03-08T21:54:02.000Z | 2021-02-02T22:33:51.000Z | bidfx/__init__.py | bidfx/bidfx-api-py | 6b5e2c5efaa547b2d97a5556ef8d21d1de807f68 | [
"Apache-2.0"
] | 2 | 2020-06-13T10:52:18.000Z | 2022-03-02T17:29:45.000Z | from .exceptions import *
from .pricing import *
from .session import *
__all__ = session.__all__ + pricing.__all__ + exceptions.__all__
__version__ = "1.0.2"
| 22.857143 | 64 | 0.75 | from .exceptions import *
from .pricing import *
from .session import *
__all__ = session.__all__ + pricing.__all__ + exceptions.__all__
__version__ = "1.0.2"
| true | true |
f71bdcb6632ab0a396bd37cc7b250ad677eaebe8 | 12,992 | py | Python | examples/PPO_super_mario_bros/env.py | hybug/RL_Lab | 0748e143a0fb60b9912ca28fbebc25e8f97a2fe4 | [
"Unlicense"
] | 3 | 2020-12-31T02:20:15.000Z | 2021-11-16T02:26:57.000Z | examples/PPO_super_mario_bros/env.py | hybug/RL_Lab | 0748e143a0fb60b9912ca28fbebc25e8f97a2fe4 | [
"Unlicense"
] | null | null | null | examples/PPO_super_mario_bros/env.py | hybug/RL_Lab | 0748e143a0fb60b9912ca28fbebc25e8f97a2fe4 | [
"Unlicense"
] | null | null | null | '''
Author: hanyu
Date: 2020-11-06 13:04:12
LastEditTime: 2021-01-09 09:07:08
LastEditors: hanyu
Description: environment
FilePath: /test_ppo/examples/PPO_super_mario_bros/env.py
'''
import logging
import numpy as np
from collections import namedtuple
# todo, to common
def padding(input, seqlen, dtype):
input = np.array(input, dtype=dtype)
if len(input) >= seqlen:
return input
shape = input.shape
pad = np.tile(
np.zeros_like(input[0:1], dtype=dtype),
[seqlen - shape[0]] + (len(shape) - 1) * [1])
return np.concatenate([input, pad], axis=0)
Seg = namedtuple("Seg", ["s", "a", "a_logits",
"r", "gaes", "v_cur", "state_in"])
def _warp_env():
import random
from utils.get_gaes import get_gaes
import gym_super_mario_bros
from PIL import Image
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
from nes_py.wrappers import JoypadSpace
class Env(object):
"""
Raw single environment of game
"""
def __init__(self, act_space, act_repeats, frames, state_size, burn_in, seqlen, game):
'''
description: init basic params settings.
param {
act_space: agent act spaces.
act_repeats: one a repeats number, default as 1.
frames: stack of frames for each state.
state_size: state_size calculated in build_policy_evaluator().
burn_in: sequences length of each burn-in(dropped) segment.
seqlen: sequences length of each training segment.
game: game environment.
}
return {None}
'''
self.act_space = act_space
self.act_repeats = act_repeats
self.act_repeat = random.choice(self.act_repeats)
self.frames = frames
self.state_size = state_size
self.game = game
self.burn_in = burn_in
self.seqlen = seqlen
self.max_pos = -10000
self.count = 0
# make gym env from gym_super_mario_bros
env = gym_super_mario_bros.make(game)
# warp the raw env through JoypadSpace according act_space
if self.act_space == 7:
self.env = JoypadSpace(env, SIMPLE_MOVEMENT)
elif self.act_space == 12:
self.env = JoypadSpace(env, COMPLEX_MOVEMENT)
# resize the output image to 84*84 & normalize the pixel
# input: (240, 256, 3)
# output: (84, 84, 1)
s_t = self.resize_image(self.env.reset())
# expand the state dimension
# output: (84, 84, frames)
self.s_t = np.tile(s_t, [1, 1, frames])
# add the batch_size dimension
# output: (batch_size, 84, 84, frames)
self.s = [self.s_t]
# action shape: (batch_size, )
self.a_t = random.randint(0, act_space - 1)
self.a = [self.a_t]
# action logits shape: (batch_size, act_space)
self.a_logits = []
self.r = [0]
self.pos = []
self.v_cur = []
# decides according to build_policy_evaluator()
state_in = np.zeros(self.state_size, dtype=np.float32)
# state_in shape: (batch_size, state_in_number)
self.state_in = [state_in]
self.done = False
def step(self, a, a_logits, v_cur, state_in, force=False):
'''
description: step function
param {
a: step action
a_logits: action logits
v_cur: current value
state_in: state_in
force: force flag
}
return {
segs: list of ["s", "a", "a_logits", "r", "gaes", "v_cur", "state_in"]
}
'''
# repeat the last action or step the current action
# according to the act_repeat
self.count += 1
if self.count % self.act_repeat == 0:
self.a_t = a
self.count = 0
self.act_repeat = random.choice(self.act_repeats)
# step the action and get the result
gs_t1, gr_t, gdone, ginfo = self.env.step(self.a_t)
if not gdone:
s_t1, r_t, done, info = self.env.step(self.a_t)
r_t += gr_t
r_t /= 2.
else:
s_t1 = gs_t1
r_t = gr_t
done = gdone
info = ginfo
# reward scaling
r_t /= 15.
s_t1 = self.resize_image(s_t1)
channels = s_t1.shape[-1]
# concatenate s_t1(the last stacked frame)
# to self.s_t(drop the first stacked frame)
self.s_t = np.concatenate(
[s_t1, self.s_t[:, :, :-channels]], axis=-1)
self.s.append(self.s_t)
self.a.append(self.a_t)
self.a_logits.append(a_logits)
self.r.append(r_t)
self.max_pos = max(self.max_pos, info["x_pos"])
self.pos.append(info["x_pos"])
if (len(self.pos) > 100) and (
info["x_pos"] - self.pos[-100] < 5) and (
self.pos[-100] - info["x_pos"] < 5):
done = True
self.done = done
self.v_cur.append(v_cur)
self.state_in.append(state_in)
"""
get segs
"""
segs = self.get_history(force)
"""
reset env
"""
self.reset(force)
return segs
def reset(self, force=False):
if self.done or force:
max_pos = self.max_pos
self.max_pos = -10000
print(" Max Position %s : %d" % (self.game, max_pos))
self.count = 0
self.act_repeat = random.choice(self.act_repeats)
s_t = self.resize_image(self.env.reset())
self.s_t = np.tile(s_t, [1, 1, self.frames])
self.s = [self.s_t]
self.a_t = random.randint(0, self.act_space - 1)
self.a = [self.a_t]
self.a_logits = []
self.r = [0]
self.pos = []
self.v_cur = []
state_in = np.zeros(self.state_size, dtype=np.float32)
self.state_in = [state_in]
self.done = False
def get_state(self):
return self.s_t
def get_act(self):
return self.a_t
def get_max_pos(self):
return self.max_pos
def reset_max_pos(self):
self.max_pos = -10000
def get_state_in(self):
return self.state_in[-1]
def get_history(self, force=False):
if self.done or force:
if self.done:
# using Generalized Advantage Estimator estimate Advantage
gaes, _ = get_gaes(None, self.r, self.v_cur,
self.v_cur[1:] + [0], 0.99, 0.95)
seg = Seg(self.s, self.a, self.a_logits, self.r,
gaes, self.v_cur, self.state_in)
return self.postprocess(seg)
if force and len(self.r) > 1:
gaes, _ = get_gaes(
None, self.r[:-1], self.v_cur[:-1], self.v_cur[1:], 0.99, 0.95)
seg = Seg(self.s[:-1], self.a[:-1], self.a_logits[:-1], self.r[:-1], gaes,
self.v_cur[:-1], self.state_in[:-1])
return self.postprocess(seg)
return None
@staticmethod
def resize_image(image, size=84):
'''
description: resize and norm the image
param {
image: image of np.array
size: the size after resize
}
return {the image after resize and norm}
'''
image = Image.fromarray(image)
image = image.convert("L")
image = image.resize((size, size))
image = np.array(image)
image = image / 255.
image = np.array(image, np.float32)
return image[:, :, None]
def postprocess(self, seg):
"""
postprocess the seg for training,
split the raw seg into several seqlen segs.
"""
burn_in = self.burn_in
seqlen = self.seqlen + burn_in
seg_results = []
if seg is not None:
while len(seg[0]) > burn_in:
next_seg = dict()
# input: (121(depends on done timing), 84, 84, frames)
# output: (seqlen, 84, 84, frames)
next_seg["s"] = padding(seg.s[:seqlen], seqlen, np.float32)
next_seg["a"] = padding(
seg.a[1:seqlen + 1], seqlen, np.int32)
next_seg["prev_a"] = padding(
seg.a[:seqlen], seqlen, np.int32)
next_seg["a_logits"] = padding(
seg.a_logits[:seqlen], seqlen, np.float32)
next_seg["r"] = padding(
seg.r[1:seqlen + 1], seqlen, np.float32)
next_seg["prev_r"] = padding(
seg.r[:seqlen], seqlen, np.float32)
next_seg["adv"] = padding(
seg.gaes[:seqlen], seqlen, np.float32)
next_seg["v_cur"] = padding(
seg.v_cur[:seqlen], seqlen, np.float32)
next_seg["state_in"] = np.array(
seg.state_in[0], np.float32)
next_seg["slots"] = padding(
len(seg.s[:seqlen]) * [1], seqlen, np.int32)
seg_results.append(next_seg)
seg = Seg(*[t[burn_in:] for t in seg])
if any(seg_results):
# print("full use one segs done!")
return seg_results
else:
return None
class Envs(object):
def __init__(self, act_space, act_repeats, frames,
state_size, burn_in, seqlen, games):
'''
description: init the environment list
param {params}
return {*}
'''
self.envs = []
for game in games:
env = Env(act_space, act_repeats, frames,
state_size, burn_in, seqlen, game)
self.envs.append(env)
def step(self, sess, model):
'''
description: step action according to neural network model
param {
sess: tensorflow session
model: the neural network model
}
return {the list of Seg}
'''
feed_dict = self.get_feed_dict(model)
# get predicted action from model
a, a_logits, v_cur, state_in = sess.run(
[model.current_act, model.current_act_logits,
model.current_value, model.state_out],
feed_dict=feed_dict
)
# step the predicted action in turn
segs = [env.step(
a[i][0],
a_logits[i][0],
v_cur[i][0],
state_in[i]
) for (i, env) in enumerate(self.envs)]
segs = [t2 for t1 in segs if t1 is not None for t2 in t1]
return segs
def get_feed_dict(self, model):
'''
description: get the feed_dict of model
param {*}
return {*}
'''
feed_dict = dict()
feed_dict[model.s_t] = [[env.get_state()] for env in self.envs]
feed_dict[model.previous_actions] = [[env.get_act()]
for env in self.envs]
feed_dict[model.prev_r] = [[env.r[-1]] for env in self.envs]
feed_dict[model.state_in] = [env.get_state_in()
for env in self.envs]
return feed_dict
return Envs
def build_env(kwargs):
Envs = _warp_env()
state_size = kwargs['state_size']
action_repeats = kwargs['action_repeats']
frames = kwargs["frames"]
parallel = kwargs['parallel']
act_space = kwargs['act_space']
burn_in = kwargs['burn_in']
seqlen = kwargs['seqlen']
games = ["SuperMarioBros-%d-%d-v0" %
(i, j) for i in range(1, 9) for j in range(1, 5)]
games = games * (parallel // len(games))
envs = Envs(act_space, action_repeats, frames,
state_size, burn_in, seqlen, games)
return envs
| 34.831099 | 94 | 0.491379 | import logging
import numpy as np
from collections import namedtuple
def padding(input, seqlen, dtype):
input = np.array(input, dtype=dtype)
if len(input) >= seqlen:
return input
shape = input.shape
pad = np.tile(
np.zeros_like(input[0:1], dtype=dtype),
[seqlen - shape[0]] + (len(shape) - 1) * [1])
return np.concatenate([input, pad], axis=0)
Seg = namedtuple("Seg", ["s", "a", "a_logits",
"r", "gaes", "v_cur", "state_in"])
def _warp_env():
import random
from utils.get_gaes import get_gaes
import gym_super_mario_bros
from PIL import Image
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
from nes_py.wrappers import JoypadSpace
class Env(object):
def __init__(self, act_space, act_repeats, frames, state_size, burn_in, seqlen, game):
self.act_space = act_space
self.act_repeats = act_repeats
self.act_repeat = random.choice(self.act_repeats)
self.frames = frames
self.state_size = state_size
self.game = game
self.burn_in = burn_in
self.seqlen = seqlen
self.max_pos = -10000
self.count = 0
env = gym_super_mario_bros.make(game)
if self.act_space == 7:
self.env = JoypadSpace(env, SIMPLE_MOVEMENT)
elif self.act_space == 12:
self.env = JoypadSpace(env, COMPLEX_MOVEMENT)
s_t = self.resize_image(self.env.reset())
self.s_t = np.tile(s_t, [1, 1, frames])
self.s = [self.s_t]
self.a_t = random.randint(0, act_space - 1)
self.a = [self.a_t]
self.a_logits = []
self.r = [0]
self.pos = []
self.v_cur = []
state_in = np.zeros(self.state_size, dtype=np.float32)
self.state_in = [state_in]
self.done = False
def step(self, a, a_logits, v_cur, state_in, force=False):
self.count += 1
if self.count % self.act_repeat == 0:
self.a_t = a
self.count = 0
self.act_repeat = random.choice(self.act_repeats)
gs_t1, gr_t, gdone, ginfo = self.env.step(self.a_t)
if not gdone:
s_t1, r_t, done, info = self.env.step(self.a_t)
r_t += gr_t
r_t /= 2.
else:
s_t1 = gs_t1
r_t = gr_t
done = gdone
info = ginfo
r_t /= 15.
s_t1 = self.resize_image(s_t1)
channels = s_t1.shape[-1]
self.s_t = np.concatenate(
[s_t1, self.s_t[:, :, :-channels]], axis=-1)
self.s.append(self.s_t)
self.a.append(self.a_t)
self.a_logits.append(a_logits)
self.r.append(r_t)
self.max_pos = max(self.max_pos, info["x_pos"])
self.pos.append(info["x_pos"])
if (len(self.pos) > 100) and (
info["x_pos"] - self.pos[-100] < 5) and (
self.pos[-100] - info["x_pos"] < 5):
done = True
self.done = done
self.v_cur.append(v_cur)
self.state_in.append(state_in)
segs = self.get_history(force)
self.reset(force)
return segs
def reset(self, force=False):
if self.done or force:
max_pos = self.max_pos
self.max_pos = -10000
print(" Max Position %s : %d" % (self.game, max_pos))
self.count = 0
self.act_repeat = random.choice(self.act_repeats)
s_t = self.resize_image(self.env.reset())
self.s_t = np.tile(s_t, [1, 1, self.frames])
self.s = [self.s_t]
self.a_t = random.randint(0, self.act_space - 1)
self.a = [self.a_t]
self.a_logits = []
self.r = [0]
self.pos = []
self.v_cur = []
state_in = np.zeros(self.state_size, dtype=np.float32)
self.state_in = [state_in]
self.done = False
def get_state(self):
return self.s_t
def get_act(self):
return self.a_t
def get_max_pos(self):
return self.max_pos
def reset_max_pos(self):
self.max_pos = -10000
def get_state_in(self):
return self.state_in[-1]
def get_history(self, force=False):
if self.done or force:
if self.done:
gaes, _ = get_gaes(None, self.r, self.v_cur,
self.v_cur[1:] + [0], 0.99, 0.95)
seg = Seg(self.s, self.a, self.a_logits, self.r,
gaes, self.v_cur, self.state_in)
return self.postprocess(seg)
if force and len(self.r) > 1:
gaes, _ = get_gaes(
None, self.r[:-1], self.v_cur[:-1], self.v_cur[1:], 0.99, 0.95)
seg = Seg(self.s[:-1], self.a[:-1], self.a_logits[:-1], self.r[:-1], gaes,
self.v_cur[:-1], self.state_in[:-1])
return self.postprocess(seg)
return None
@staticmethod
def resize_image(image, size=84):
image = Image.fromarray(image)
image = image.convert("L")
image = image.resize((size, size))
image = np.array(image)
image = image / 255.
image = np.array(image, np.float32)
return image[:, :, None]
def postprocess(self, seg):
burn_in = self.burn_in
seqlen = self.seqlen + burn_in
seg_results = []
if seg is not None:
while len(seg[0]) > burn_in:
next_seg = dict()
next_seg["s"] = padding(seg.s[:seqlen], seqlen, np.float32)
next_seg["a"] = padding(
seg.a[1:seqlen + 1], seqlen, np.int32)
next_seg["prev_a"] = padding(
seg.a[:seqlen], seqlen, np.int32)
next_seg["a_logits"] = padding(
seg.a_logits[:seqlen], seqlen, np.float32)
next_seg["r"] = padding(
seg.r[1:seqlen + 1], seqlen, np.float32)
next_seg["prev_r"] = padding(
seg.r[:seqlen], seqlen, np.float32)
next_seg["adv"] = padding(
seg.gaes[:seqlen], seqlen, np.float32)
next_seg["v_cur"] = padding(
seg.v_cur[:seqlen], seqlen, np.float32)
next_seg["state_in"] = np.array(
seg.state_in[0], np.float32)
next_seg["slots"] = padding(
len(seg.s[:seqlen]) * [1], seqlen, np.int32)
seg_results.append(next_seg)
seg = Seg(*[t[burn_in:] for t in seg])
if any(seg_results):
return seg_results
else:
return None
class Envs(object):
def __init__(self, act_space, act_repeats, frames,
state_size, burn_in, seqlen, games):
self.envs = []
for game in games:
env = Env(act_space, act_repeats, frames,
state_size, burn_in, seqlen, game)
self.envs.append(env)
def step(self, sess, model):
feed_dict = self.get_feed_dict(model)
a, a_logits, v_cur, state_in = sess.run(
[model.current_act, model.current_act_logits,
model.current_value, model.state_out],
feed_dict=feed_dict
)
segs = [env.step(
a[i][0],
a_logits[i][0],
v_cur[i][0],
state_in[i]
) for (i, env) in enumerate(self.envs)]
segs = [t2 for t1 in segs if t1 is not None for t2 in t1]
return segs
def get_feed_dict(self, model):
feed_dict = dict()
feed_dict[model.s_t] = [[env.get_state()] for env in self.envs]
feed_dict[model.previous_actions] = [[env.get_act()]
for env in self.envs]
feed_dict[model.prev_r] = [[env.r[-1]] for env in self.envs]
feed_dict[model.state_in] = [env.get_state_in()
for env in self.envs]
return feed_dict
return Envs
def build_env(kwargs):
Envs = _warp_env()
state_size = kwargs['state_size']
action_repeats = kwargs['action_repeats']
frames = kwargs["frames"]
parallel = kwargs['parallel']
act_space = kwargs['act_space']
burn_in = kwargs['burn_in']
seqlen = kwargs['seqlen']
games = ["SuperMarioBros-%d-%d-v0" %
(i, j) for i in range(1, 9) for j in range(1, 5)]
games = games * (parallel // len(games))
envs = Envs(act_space, action_repeats, frames,
state_size, burn_in, seqlen, games)
return envs
| true | true |
f71bdd846d5f411d42153fd29bd618659a976fad | 17,083 | py | Python | orio/module/loop/submodule/composite/composite.py | phrb/orio_experiments | 934ba192301cb4e23d98b9f79e91799152bf76b1 | [
"MIT"
] | null | null | null | orio/module/loop/submodule/composite/composite.py | phrb/orio_experiments | 934ba192301cb4e23d98b9f79e91799152bf76b1 | [
"MIT"
] | null | null | null | orio/module/loop/submodule/composite/composite.py | phrb/orio_experiments | 934ba192301cb4e23d98b9f79e91799152bf76b1 | [
"MIT"
] | null | null | null | #
# Loop transformation submodule.that implements a combination of various loop transformations.
#
import sys
import orio.module.loop.submodule.submodule, transformation
import orio.module.loop.submodule.tile.tile
import orio.module.loop.submodule.permut.permut
import orio.module.loop.submodule.regtile.regtile
import orio.module.loop.submodule.unrolljam.unrolljam
import orio.module.loop.submodule.scalarreplace.scalarreplace
import orio.module.loop.submodule.boundreplace.boundreplace
import orio.module.loop.submodule.pragma.pragma
import orio.module.loop.submodule.arrcopy.arrcopy
import orio.module.loop.submodule.cuda.cuda
from orio.main.util.globals import *
#---------------------------------------------------------------------
class Composite(orio.module.loop.submodule.submodule.SubModule):
'''The composite loop transformation submodule.'''
def __init__(self, perf_params = None, transf_args = None, stmt = None, language='C', tinfo = None):
'''To instantiate a composite loop transformation submodule.'''
orio.module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt, language)
# transformation submodule.
self.tile_smod = orio.module.loop.submodule.tile.tile.Tile()
self.perm_smod = orio.module.loop.submodule.permut.permut.Permut()
self.regt_smod = orio.module.loop.submodule.regtile.regtile.RegTile()
self.ujam_smod = orio.module.loop.submodule.unrolljam.unrolljam.UnrollJam()
self.srep_smod = orio.module.loop.submodule.scalarreplace.scalarreplace.ScalarReplace()
self.brep_smod = orio.module.loop.submodule.boundreplace.boundreplace.BoundReplace()
self.prag_smod = orio.module.loop.submodule.pragma.pragma.Pragma()
self.acop_smod = orio.module.loop.submodule.arrcopy.arrcopy.ArrCopy()
self.cuda_smod = orio.module.loop.submodule.cuda.cuda.CUDA()
#-----------------------------------------------------------------
def __readTransfArgs(self, perf_params, transf_args):
'''Process the given transformation arguments'''
# all expected argument names
TILE = 'tile'
PERMUT = 'permut'
REGTILE = 'regtile'
UJAM = 'unrolljam'
SCALARREP = 'scalarreplace'
BOUNDREP = 'boundreplace'
PRAGMA = 'pragma'
OPENMP = 'openmp'
VECTOR = 'vector'
ARRCOPY = 'arrcopy'
CUDA = 'cuda'
# all expected transformation arguments
tiles = ([], None)
permuts = ([], None)
regtiles = (([],[]), None)
ujams = (([],[]), None)
scalarrep = (False, None)
boundrep = (False, None)
pragma = ([], None)
openmp = ((False, ''), None)
vector = ((False, ''), None)
arrcopy = ([], None)
cuda = ((None, False, False, None), None)
# iterate over all transformation arguments
for aname, rhs, line_no in transf_args:
# evaluate the RHS expression
try:
rhs = eval(rhs, perf_params)
except Exception, e:
err('orio.module.loop.submodule.composite.composite: %s: failed to evaluate the argument expression: %s\n --> %s: %s' %
(line_no, rhs,e.__class__.__name__, e))
# update transformation arguments
if aname == TILE:
tiles = (rhs, line_no)
elif aname == PERMUT:
permuts = (rhs, line_no)
elif aname == REGTILE:
regtiles = (rhs, line_no)
elif aname == UJAM:
ujams = (rhs, line_no)
elif aname == SCALARREP:
scalarrep = (rhs, line_no)
elif aname == BOUNDREP:
boundrep = (rhs, line_no)
elif aname == PRAGMA:
pragma = (rhs, line_no)
elif aname == OPENMP:
openmp = (rhs, line_no)
elif aname == VECTOR:
vector = (rhs, line_no)
elif aname == ARRCOPY:
arrcopy = (rhs, line_no)
elif aname == CUDA:
cuda = (rhs, line_no)
# unknown argument name
else:
err('orio.module.loop.submodule.composite.composite: %s: unrecognized transformation argument: "%s"' % (line_no, aname))
# check semantics of the transformation arguments
(tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda) = self.checkTransfArgs(tiles, permuts, regtiles, ujams,
scalarrep, boundrep, pragma,
openmp, vector, arrcopy, cuda)
# return information about the transformation arguments
return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)
#-----------------------------------------------------------------
def checkTransfArgs(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma,
openmp, vector, arrcopy, cuda):
'''Check the semantics of the given transformation arguments'''
# evaluate arguments for loop tiling
rhs, line_no = tiles
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: tile argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 3:
err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +
'(<loop-id>,<tsize>,<tindex>): %s') % (line_no, e))
loop_id, tsize, tindex = e
loop_id = self.__convertLoopId(loop_id, line_no)
tsize, tindex = self.tile_smod.checkTransfArgs((tsize, line_no), (tindex, line_no))
targs.append((loop_id, tsize, tindex))
tiles = targs
# evaluate arguments for loop permutation/interchange
rhs, line_no = permuts
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: permutation argument must be a list/tuple: %s' % (line_no, rhs))
for e in rhs:
seq, = self.perm_smod.checkTransfArgs((e, line_no))
permuts = rhs
# evaluate arguments for register tiling
rhs, line_no = regtiles
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: register-tiling argument must be a list/tuple: %s' % (line_no, rhs))
if len(rhs) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: register-tiling argument must be in the form of ' +
'(<loop-ids>,<ufactors>): %s') % (line_no, rhs))
loops, ufactors = rhs
loops, ufactors = self.regt_smod.checkTransfArgs((loops, line_no), (ufactors, line_no))
regtiles = (loops, ufactors)
# evaluate arguments for unroll/jamming
rhs, line_no = ujams
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: unroll/jam argument must be a list/tuple: %s' % (line_no, rhs))
if len(rhs) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: unroll/jam argument must be in the form of ' +
'(<loop-ids>,<ufactors>): %s') % (line_no, rhs))
loops, ufactors = rhs
for lp,uf in zip(loops, ufactors):
self.ujam_smod.checkTransfArgs((uf, line_no), (False, line_no))
ujams = (loops, ufactors)
# evaluate arguments for scalar replacement
rhs, line_no = scalarrep
if isinstance(rhs, bool) or rhs == 0 or rhs == 1:
scalarrep = (rhs, None, None)
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or
len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):
err(('orio.module.loop.submodule.composite.composite:%s: scalar replacement argument must be in the form of ' +
'((True|False),<dtype>,<prefix>): %s') % (line_no, rhs))
do_scalarrep = rhs[0]
dtype = None
prefix = None
if len(rhs) >= 2:
dtype = rhs[1]
if len(rhs) >= 3:
prefix = rhs[2]
dtype, prefix = self.srep_smod.checkTransfArgs((dtype, line_no), (prefix, line_no))
scalarrep = (do_scalarrep, dtype, prefix)
# evaluate arguments for bound replacement
rhs, line_no = boundrep
if isinstance(rhs, bool) or rhs == 0 or rhs == 1:
boundrep = (rhs, None, None)
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or
len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):
err(('orio.module.loop.submodule.composite.composite:%s: bound replacement argument must be in the form of ' +
'((True|False),<lprefix>,<uprefix>): %s') % (line_no, rhs))
do_boundrep = rhs[0]
lprefix = None
uprefix = None
if len(rhs) >= 2:
lprefix = rhs[1]
if len(rhs) >= 3:
uprefix = rhs[2]
lprefix, uprefix = self.brep_smod.checkTransfArgs((lprefix, line_no), (uprefix, line_no))
boundrep = (do_boundrep, lprefix, uprefix)
# evaluate arguments for pragma directives
rhs, line_no = pragma
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: pragma argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: element of pragma directive argument must be in the form of ' +
'(<loop-id>,<pragma-strings>): %s') % (line_no, e))
loop_id, pragmas = e
loop_id = self.__convertLoopId(loop_id, line_no)
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
targs.append((loop_id, pragmas))
pragma = targs
# evaluate arguments for openmp pragma directive
rhs, line_no = openmp
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or
not isinstance(rhs[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of openmp pragma directive argument must be in the form of ' +
'((True|False),<pragma-strings>): %s') % (line_no, rhs))
do_openmp, pragmas = rhs
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
openmp = do_openmp, pragmas
# evaluate arguments for vectorization pragma directive
rhs, line_no = vector
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or
not isinstance(rhs[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of vectorization pragma directive argument must be in ' +
'the form of ((True|False),<pragma-strings>): %s') % (line_no, rhs))
do_vector, pragmas = rhs
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
vector = do_vector, pragmas
# evaluate arguments for array-copy optimization
rhs, line_no = arrcopy
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: array-copy argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if ((not isinstance(e, list) and not isinstance(e, tuple)) or len(e) > 5 or
len(e) < 3 or not isinstance(e[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +
'((True|False),<array-ref-str>,<dim-sizes>,<suffix>,<dtype>): %s') %
(line_no, e))
dtype = None
suffix = None
if len(e) == 3:
do_acopy, aref, dimsizes = e
elif len(e) == 4:
do_acopy, aref, dimsizes, suffix = e
else:
do_acopy, aref, dimsizes, suffix, dtype = e
(aref, suffix,
dtype, dimsizes)= self.acop_smod.checkTransfArgs((aref, line_no), (suffix, line_no),
(dtype, line_no), (dimsizes, line_no))
targs.append((do_acopy, aref, suffix, dtype, dimsizes))
arrcopy = targs
# evaluate arguments for cuda
rhs, line_no = cuda
if not isinstance(rhs, tuple):
err('orio.module.loop.submodule.cuda.cuda: %s: cuda argument must be a tuple: %s' % (line_no, rhs))
if len(rhs) != 4:
err(('orio.module.loop.submodule.cuda.cuda:%s: cuda argument must be in the form of ' +
'(<threadCount>,<cacheBlocks>,<pinHostMem>,<streamCount>): %s') % (line_no, rhs))
cuda = rhs
# return information about the transformation arguments
return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)
#-----------------------------------------------------------------
def applyTransf(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda, stmt):
'''To apply a sequence of transformations'''
# perform the composite transformations
t = transformation.Transformation(tiles, permuts, regtiles, ujams, scalarrep,
boundrep, pragma, openmp, vector, arrcopy, cuda, self.stmt)
transformed_stmt = t.transform()
# return the transformed statement
return transformed_stmt
#-----------------------------------------------------------------
def __convertLoopId(self, lid, line_no):
'''
Convert the loop ID to a list: [True/False, id1, id2, id3, ...].
The 'True' boolean value indicates that at least one of the loop ID must exist in the
statement body. A 'False' value means that it is OK if no loop IDs exist in the statement
body.
The sequence of IDs imply that "apply optimizations on id1 (if exist), if not, apply
optimizations on id2 (if exist), and so on and so forth".
'''
# check if the loop ID is well-formed
if isinstance(lid, str):
pass
elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:
for i in lid:
if not isinstance(i, str):
err('orio.module.loop.submodule.composite.composite: %s: loop ID must be a string: %s' % (line_no, i))
else:
err('orio.module.loop.submodule.composite.composite: %s: invalid loop ID representation: %s' % (line_no, lid))
# create the loop ID abstraction
lids = []
if isinstance(lid, str):
lids.append(True)
lids.append(lid)
elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:
lids.append(isinstance(lid, tuple))
lids.extend(lid)
else:
err('orio.module.loop.submodule.composite.composite internal error: '+
'incorrect representation of the loop IDs', doexit=True)
return lids
#-----------------------------------------------------------------
def transform(self):
'''To apply various loop transformations'''
# debugging info
#debug("perf_params=" + str(self.perf_params), self,level=6)
# read all transformation arguments
args_info = self.__readTransfArgs(self.perf_params, self.transf_args)
(tiles, permuts, regtiles, ujams, scalarrep,
boundrep, pragma, openmp, vector, arrcopy, cuda) = args_info
# perform all transformations
try:
transformed_stmt = self.applyTransf(tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda, self.stmt)
except Exception, e:
err('orio.module.loop.submodule.composite.composite : error transforming "%s"\n --> %s:%s' %
(self.stmt, e.__class__, e))
# return the transformed statement
return transformed_stmt
| 48.121127 | 139 | 0.572967 |
import sys
import orio.module.loop.submodule.submodule, transformation
import orio.module.loop.submodule.tile.tile
import orio.module.loop.submodule.permut.permut
import orio.module.loop.submodule.regtile.regtile
import orio.module.loop.submodule.unrolljam.unrolljam
import orio.module.loop.submodule.scalarreplace.scalarreplace
import orio.module.loop.submodule.boundreplace.boundreplace
import orio.module.loop.submodule.pragma.pragma
import orio.module.loop.submodule.arrcopy.arrcopy
import orio.module.loop.submodule.cuda.cuda
from orio.main.util.globals import *
class Composite(orio.module.loop.submodule.submodule.SubModule):
'''The composite loop transformation submodule.'''
def __init__(self, perf_params = None, transf_args = None, stmt = None, language='C', tinfo = None):
'''To instantiate a composite loop transformation submodule.'''
orio.module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt, language)
self.tile_smod = orio.module.loop.submodule.tile.tile.Tile()
self.perm_smod = orio.module.loop.submodule.permut.permut.Permut()
self.regt_smod = orio.module.loop.submodule.regtile.regtile.RegTile()
self.ujam_smod = orio.module.loop.submodule.unrolljam.unrolljam.UnrollJam()
self.srep_smod = orio.module.loop.submodule.scalarreplace.scalarreplace.ScalarReplace()
self.brep_smod = orio.module.loop.submodule.boundreplace.boundreplace.BoundReplace()
self.prag_smod = orio.module.loop.submodule.pragma.pragma.Pragma()
self.acop_smod = orio.module.loop.submodule.arrcopy.arrcopy.ArrCopy()
self.cuda_smod = orio.module.loop.submodule.cuda.cuda.CUDA()
def __readTransfArgs(self, perf_params, transf_args):
'''Process the given transformation arguments'''
TILE = 'tile'
PERMUT = 'permut'
REGTILE = 'regtile'
UJAM = 'unrolljam'
SCALARREP = 'scalarreplace'
BOUNDREP = 'boundreplace'
PRAGMA = 'pragma'
OPENMP = 'openmp'
VECTOR = 'vector'
ARRCOPY = 'arrcopy'
CUDA = 'cuda'
tiles = ([], None)
permuts = ([], None)
regtiles = (([],[]), None)
ujams = (([],[]), None)
scalarrep = (False, None)
boundrep = (False, None)
pragma = ([], None)
openmp = ((False, ''), None)
vector = ((False, ''), None)
arrcopy = ([], None)
cuda = ((None, False, False, None), None)
for aname, rhs, line_no in transf_args:
try:
rhs = eval(rhs, perf_params)
except Exception, e:
err('orio.module.loop.submodule.composite.composite: %s: failed to evaluate the argument expression: %s\n --> %s: %s' %
(line_no, rhs,e.__class__.__name__, e))
if aname == TILE:
tiles = (rhs, line_no)
elif aname == PERMUT:
permuts = (rhs, line_no)
elif aname == REGTILE:
regtiles = (rhs, line_no)
elif aname == UJAM:
ujams = (rhs, line_no)
elif aname == SCALARREP:
scalarrep = (rhs, line_no)
elif aname == BOUNDREP:
boundrep = (rhs, line_no)
elif aname == PRAGMA:
pragma = (rhs, line_no)
elif aname == OPENMP:
openmp = (rhs, line_no)
elif aname == VECTOR:
vector = (rhs, line_no)
elif aname == ARRCOPY:
arrcopy = (rhs, line_no)
elif aname == CUDA:
cuda = (rhs, line_no)
else:
err('orio.module.loop.submodule.composite.composite: %s: unrecognized transformation argument: "%s"' % (line_no, aname))
(tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda) = self.checkTransfArgs(tiles, permuts, regtiles, ujams,
scalarrep, boundrep, pragma,
openmp, vector, arrcopy, cuda)
return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)
def checkTransfArgs(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma,
openmp, vector, arrcopy, cuda):
'''Check the semantics of the given transformation arguments'''
rhs, line_no = tiles
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: tile argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 3:
err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +
'(<loop-id>,<tsize>,<tindex>): %s') % (line_no, e))
loop_id, tsize, tindex = e
loop_id = self.__convertLoopId(loop_id, line_no)
tsize, tindex = self.tile_smod.checkTransfArgs((tsize, line_no), (tindex, line_no))
targs.append((loop_id, tsize, tindex))
tiles = targs
rhs, line_no = permuts
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: permutation argument must be a list/tuple: %s' % (line_no, rhs))
for e in rhs:
seq, = self.perm_smod.checkTransfArgs((e, line_no))
permuts = rhs
rhs, line_no = regtiles
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: register-tiling argument must be a list/tuple: %s' % (line_no, rhs))
if len(rhs) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: register-tiling argument must be in the form of ' +
'(<loop-ids>,<ufactors>): %s') % (line_no, rhs))
loops, ufactors = rhs
loops, ufactors = self.regt_smod.checkTransfArgs((loops, line_no), (ufactors, line_no))
regtiles = (loops, ufactors)
rhs, line_no = ujams
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: unroll/jam argument must be a list/tuple: %s' % (line_no, rhs))
if len(rhs) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: unroll/jam argument must be in the form of ' +
'(<loop-ids>,<ufactors>): %s') % (line_no, rhs))
loops, ufactors = rhs
for lp,uf in zip(loops, ufactors):
self.ujam_smod.checkTransfArgs((uf, line_no), (False, line_no))
ujams = (loops, ufactors)
rhs, line_no = scalarrep
if isinstance(rhs, bool) or rhs == 0 or rhs == 1:
scalarrep = (rhs, None, None)
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or
len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):
err(('orio.module.loop.submodule.composite.composite:%s: scalar replacement argument must be in the form of ' +
'((True|False),<dtype>,<prefix>): %s') % (line_no, rhs))
do_scalarrep = rhs[0]
dtype = None
prefix = None
if len(rhs) >= 2:
dtype = rhs[1]
if len(rhs) >= 3:
prefix = rhs[2]
dtype, prefix = self.srep_smod.checkTransfArgs((dtype, line_no), (prefix, line_no))
scalarrep = (do_scalarrep, dtype, prefix)
rhs, line_no = boundrep
if isinstance(rhs, bool) or rhs == 0 or rhs == 1:
boundrep = (rhs, None, None)
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or
len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):
err(('orio.module.loop.submodule.composite.composite:%s: bound replacement argument must be in the form of ' +
'((True|False),<lprefix>,<uprefix>): %s') % (line_no, rhs))
do_boundrep = rhs[0]
lprefix = None
uprefix = None
if len(rhs) >= 2:
lprefix = rhs[1]
if len(rhs) >= 3:
uprefix = rhs[2]
lprefix, uprefix = self.brep_smod.checkTransfArgs((lprefix, line_no), (uprefix, line_no))
boundrep = (do_boundrep, lprefix, uprefix)
rhs, line_no = pragma
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: pragma argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: element of pragma directive argument must be in the form of ' +
'(<loop-id>,<pragma-strings>): %s') % (line_no, e))
loop_id, pragmas = e
loop_id = self.__convertLoopId(loop_id, line_no)
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
targs.append((loop_id, pragmas))
pragma = targs
rhs, line_no = openmp
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or
not isinstance(rhs[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of openmp pragma directive argument must be in the form of ' +
'((True|False),<pragma-strings>): %s') % (line_no, rhs))
do_openmp, pragmas = rhs
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
openmp = do_openmp, pragmas
rhs, line_no = vector
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or
not isinstance(rhs[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of vectorization pragma directive argument must be in ' +
'the form of ((True|False),<pragma-strings>): %s') % (line_no, rhs))
do_vector, pragmas = rhs
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
vector = do_vector, pragmas
rhs, line_no = arrcopy
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: array-copy argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if ((not isinstance(e, list) and not isinstance(e, tuple)) or len(e) > 5 or
len(e) < 3 or not isinstance(e[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +
'((True|False),<array-ref-str>,<dim-sizes>,<suffix>,<dtype>): %s') %
(line_no, e))
dtype = None
suffix = None
if len(e) == 3:
do_acopy, aref, dimsizes = e
elif len(e) == 4:
do_acopy, aref, dimsizes, suffix = e
else:
do_acopy, aref, dimsizes, suffix, dtype = e
(aref, suffix,
dtype, dimsizes)= self.acop_smod.checkTransfArgs((aref, line_no), (suffix, line_no),
(dtype, line_no), (dimsizes, line_no))
targs.append((do_acopy, aref, suffix, dtype, dimsizes))
arrcopy = targs
rhs, line_no = cuda
if not isinstance(rhs, tuple):
err('orio.module.loop.submodule.cuda.cuda: %s: cuda argument must be a tuple: %s' % (line_no, rhs))
if len(rhs) != 4:
err(('orio.module.loop.submodule.cuda.cuda:%s: cuda argument must be in the form of ' +
'(<threadCount>,<cacheBlocks>,<pinHostMem>,<streamCount>): %s') % (line_no, rhs))
cuda = rhs
return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)
def applyTransf(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda, stmt):
'''To apply a sequence of transformations'''
t = transformation.Transformation(tiles, permuts, regtiles, ujams, scalarrep,
boundrep, pragma, openmp, vector, arrcopy, cuda, self.stmt)
transformed_stmt = t.transform()
return transformed_stmt
def __convertLoopId(self, lid, line_no):
'''
Convert the loop ID to a list: [True/False, id1, id2, id3, ...].
The 'True' boolean value indicates that at least one of the loop ID must exist in the
statement body. A 'False' value means that it is OK if no loop IDs exist in the statement
body.
The sequence of IDs imply that "apply optimizations on id1 (if exist), if not, apply
optimizations on id2 (if exist), and so on and so forth".
'''
if isinstance(lid, str):
pass
elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:
for i in lid:
if not isinstance(i, str):
err('orio.module.loop.submodule.composite.composite: %s: loop ID must be a string: %s' % (line_no, i))
else:
err('orio.module.loop.submodule.composite.composite: %s: invalid loop ID representation: %s' % (line_no, lid))
lids = []
if isinstance(lid, str):
lids.append(True)
lids.append(lid)
elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:
lids.append(isinstance(lid, tuple))
lids.extend(lid)
else:
err('orio.module.loop.submodule.composite.composite internal error: '+
'incorrect representation of the loop IDs', doexit=True)
return lids
def transform(self):
'''To apply various loop transformations'''
args_info = self.__readTransfArgs(self.perf_params, self.transf_args)
(tiles, permuts, regtiles, ujams, scalarrep,
boundrep, pragma, openmp, vector, arrcopy, cuda) = args_info
try:
transformed_stmt = self.applyTransf(tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda, self.stmt)
except Exception, e:
err('orio.module.loop.submodule.composite.composite : error transforming "%s"\n --> %s:%s' %
(self.stmt, e.__class__, e))
return transformed_stmt
| false | true |
f71bdd99c5f2350a22133188e60716a5121147ae | 403 | py | Python | SentyectorAPI/SentyectorAPI/wsgi.py | vaibhavarora102/Sentyector | c9023fe38e3517bd39b932a3282f5aebe5e84fbf | [
"MIT"
] | null | null | null | SentyectorAPI/SentyectorAPI/wsgi.py | vaibhavarora102/Sentyector | c9023fe38e3517bd39b932a3282f5aebe5e84fbf | [
"MIT"
] | null | null | null | SentyectorAPI/SentyectorAPI/wsgi.py | vaibhavarora102/Sentyector | c9023fe38e3517bd39b932a3282f5aebe5e84fbf | [
"MIT"
] | 3 | 2021-04-10T06:33:07.000Z | 2021-04-10T12:04:19.000Z | """
WSGI config for SentyectorAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SentyectorAPI.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SentyectorAPI.settings')
application = get_wsgi_application()
| true | true |
f71bddb9bef15222e2b3b6e6039b80e283a19bf6 | 11,278 | py | Python | src/model/dataProcessing/coco_utils.py | monkeypants/CartridgeOCR | a2cdaa72e3839a881118b85f5ff7b4515579004b | [
"MIT"
] | 2 | 2021-07-12T02:37:46.000Z | 2021-12-28T23:03:20.000Z | src/model/dataProcessing/coco_utils.py | monkeypants/CartridgeOCR | a2cdaa72e3839a881118b85f5ff7b4515579004b | [
"MIT"
] | 28 | 2021-12-29T00:51:24.000Z | 2022-03-24T08:03:59.000Z | src/model/dataProcessing/coco_utils.py | monkeypants/CartridgeOCR | a2cdaa72e3839a881118b85f5ff7b4515579004b | [
"MIT"
] | 4 | 2021-09-24T16:13:43.000Z | 2022-03-09T17:52:35.000Z | import copy
import os
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
import dataProcessing.transforms as T
import logging
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_polygons(polygons, height, width):
max_width = 1080
if width > max_width:
logging.warn('invalid width needs normalizing')
polyout = []
for p in polygons:
mult = [width, height] * (len(p) // 2)
assert(len(mult) == len(p))
polyout.append([x * y for x, y in zip(p, mult)])
return polyout
def transform_coco_polygon(segmentations, height, width):
result = []
for polygons in segmentations:
# print('polygons: ',polygons)
polyout = convert_polygons(polygons, height, width)
result.append(polyout)
return result
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
# print('polygons: ',polygons)
polygons = convert_polygons(polygons, height, width)
# print('poly2', polygons)
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
def transform_coco_annotation(anno, height, width):
anno['segmentation'] = convert_polygons(anno['segmentation'], height, width)
anno['bbox'] = [x * y for (x, y) in zip(anno['bbox'], [width, height, width, height])]
for i in range(2, len(anno['bbox'])):
anno['bbox'][i] += anno['bbox'][i - 2]
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
# print(w,h)
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
# TODO: now fixed in the conversion script.
# for obj in anno:
# obj['iscrowd']=0
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes *= torch.as_tensor([w, h, w, h])
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
# iscrowd = torch.tensor([0 for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
ann_id = 0
dataset = {'images': [], 'categories': [], 'annotations': []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
image_id = targets["image_id"].item()
img_dict = {}
img_dict['id'] = image_id
img_dict['height'] = img.shape[-2]
img_dict['width'] = img.shape[-1]
img_dict['image'] = img
dataset['images'].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets['labels'].tolist()
areas = targets['area'].tolist()
iscrowd = targets['iscrowd'].tolist()
if 'masks' in targets:
masks = targets['masks']
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if 'keypoints' in targets:
keypoints = targets['keypoints']
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann['image_id'] = image_id
ann['bbox'] = bboxes[i]
ann['category_id'] = labels[i]
categories.add(labels[i])
ann['area'] = areas[i]
ann['iscrowd'] = iscrowd[i]
ann['id'] = ann_id
if 'masks' in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if 'keypoints' in targets:
ann['keypoints'] = keypoints[i]
ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
dataset['annotations'].append(ann)
ann_id += 1
dataset['categories'] = [{'id': i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for i in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
# print(image_id)
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
@staticmethod
def get_coco_api(dataset, transform=False):
for i in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
if not transform:
return dataset.coco
else:
return dataset.transform_coco_api()
raise Exception("No instance of CocoDetection found")
def transform_coco_api(self):
coco = copy.deepcopy(self.coco)
image_sizes = {}
for img, target in self:
image_sizes[target['image_id'].item()] = img.size()[1:] # TODO: width vs height. Always len 3?
for img in coco.dataset['images']:
(h, w) = image_sizes[img['id']]
img['width'] = w
img['height'] = h
for ann in coco.dataset['annotations']:
id = ann['image_id']
(h, w) = image_sizes[id]
transform_coco_annotation(ann, h, w)
coco.createIndex()
return coco
def get_coco(root, image_set, transforms, mode='instances'):
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": ("train2017", os.path.join("annotations", anno_file_template.format(mode, "train"))),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val")))
}
t = [ConvertCocoPolysToMask()]
if transforms is not None:
t.append(transforms)
transforms = T.Compose(t)
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root, ann_file)
dataset = CocoDetection(img_folder, ann_file, transforms=transforms)
if image_set == "train":
dataset = _coco_remove_images_without_annotations(dataset)
# dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])
return dataset
def get_coco_kp(root, image_set, transforms):
return get_coco(root, image_set, transforms, mode="person_keypoints")
| 34.916409 | 107 | 0.605604 | import copy
import os
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
import dataProcessing.transforms as T
import logging
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_polygons(polygons, height, width):
max_width = 1080
if width > max_width:
logging.warn('invalid width needs normalizing')
polyout = []
for p in polygons:
mult = [width, height] * (len(p) // 2)
assert(len(mult) == len(p))
polyout.append([x * y for x, y in zip(p, mult)])
return polyout
def transform_coco_polygon(segmentations, height, width):
result = []
for polygons in segmentations:
polyout = convert_polygons(polygons, height, width)
result.append(polyout)
return result
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
polygons = convert_polygons(polygons, height, width)
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
def transform_coco_annotation(anno, height, width):
anno['segmentation'] = convert_polygons(anno['segmentation'], height, width)
anno['bbox'] = [x * y for (x, y) in zip(anno['bbox'], [width, height, width, height])]
for i in range(2, len(anno['bbox'])):
anno['bbox'][i] += anno['bbox'][i - 2]
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes *= torch.as_tensor([w, h, w, h])
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
ann_id = 0
dataset = {'images': [], 'categories': [], 'annotations': []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
image_id = targets["image_id"].item()
img_dict = {}
img_dict['id'] = image_id
img_dict['height'] = img.shape[-2]
img_dict['width'] = img.shape[-1]
img_dict['image'] = img
dataset['images'].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets['labels'].tolist()
areas = targets['area'].tolist()
iscrowd = targets['iscrowd'].tolist()
if 'masks' in targets:
masks = targets['masks']
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if 'keypoints' in targets:
keypoints = targets['keypoints']
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann['image_id'] = image_id
ann['bbox'] = bboxes[i]
ann['category_id'] = labels[i]
categories.add(labels[i])
ann['area'] = areas[i]
ann['iscrowd'] = iscrowd[i]
ann['id'] = ann_id
if 'masks' in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if 'keypoints' in targets:
ann['keypoints'] = keypoints[i]
ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
dataset['annotations'].append(ann)
ann_id += 1
dataset['categories'] = [{'id': i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for i in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
# print(image_id)
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
@staticmethod
def get_coco_api(dataset, transform=False):
for i in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
if not transform:
return dataset.coco
else:
return dataset.transform_coco_api()
raise Exception("No instance of CocoDetection found")
def transform_coco_api(self):
coco = copy.deepcopy(self.coco)
image_sizes = {}
for img, target in self:
image_sizes[target['image_id'].item()] = img.size()[1:] # TODO: width vs height. Always len 3?
for img in coco.dataset['images']:
(h, w) = image_sizes[img['id']]
img['width'] = w
img['height'] = h
for ann in coco.dataset['annotations']:
id = ann['image_id']
(h, w) = image_sizes[id]
transform_coco_annotation(ann, h, w)
coco.createIndex()
return coco
def get_coco(root, image_set, transforms, mode='instances'):
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": ("train2017", os.path.join("annotations", anno_file_template.format(mode, "train"))),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val")))
}
t = [ConvertCocoPolysToMask()]
if transforms is not None:
t.append(transforms)
transforms = T.Compose(t)
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root, ann_file)
dataset = CocoDetection(img_folder, ann_file, transforms=transforms)
if image_set == "train":
dataset = _coco_remove_images_without_annotations(dataset)
# dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])
return dataset
def get_coco_kp(root, image_set, transforms):
return get_coco(root, image_set, transforms, mode="person_keypoints")
| true | true |
f71bdfb4494b0ea7e8661dce26911937bbed2de0 | 1,952 | py | Python | facility_management/facility_management/doctype/fm_dashboard/fm_dashboard.py | odoochain/facility_management | 545146db4e58e90311934a9d39c77def2d2a3e70 | [
"MIT"
] | 13 | 2020-06-23T23:44:16.000Z | 2022-03-19T14:40:49.000Z | facility_management/facility_management/doctype/fm_dashboard/fm_dashboard.py | pazari/fmt | 649618d47cd5cdefce93b5dc7efe5c25c299ad9c | [
"MIT"
] | 2 | 2021-06-04T06:18:09.000Z | 2021-06-06T08:41:36.000Z | facility_management/facility_management/doctype/fm_dashboard/fm_dashboard.py | pazari/fmt | 649618d47cd5cdefce93b5dc7efe5c25c299ad9c | [
"MIT"
] | 18 | 2020-02-18T10:57:13.000Z | 2022-01-26T09:01:21.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FMDashboard(Document):
def make_outstanding_balances(self):
"""
Make outstanding balances for display
:return:
"""
self.outstanding_balances = None
outstanding_balances = _get_outstanding_balances(_get_properties(self.real_estate_property))
for outstanding_balance in outstanding_balances:
self.append('outstanding_balances', {
'property_name': outstanding_balance.get('property_name'),
'sales_invoice': outstanding_balance.get('sales_invoice'),
'outstanding_amount': outstanding_balance.get('outstanding_amount')
})
def _get_properties(real_estate_property):
return list(map(lambda x: x['name'], frappe.get_all('Property', {'property_location': real_estate_property})))
def _get_outstanding_balances(filter_properties):
def make_data(balance):
property_name = _get_property_name(balance.get('pm_tenant_renting'))
return {
'property_name': property_name,
'sales_invoice': balance.get('name'),
'outstanding_amount': balance.get('outstanding_amount')
}
outstanding = frappe.db.sql("""
SELECT
si.name,
si.pm_tenant_renting,
si.outstanding_amount,
tr.property
FROM `tabSales Invoice` si
LEFT JOIN `tabTenant Renting` tr ON si.pm_tenant_renting = tr.name
WHERE si.docstatus = 1
AND si.outstanding_amount > 0
AND si.pm_tenant_renting != ''
""", as_dict=True)
outstanding = filter(lambda x: x['property'] in filter_properties, outstanding)
return list(map(make_data, outstanding))
def _get_property_name(tenant_renting):
data = frappe.db.sql("""
SELECT p.title
FROM `tabTenant Renting` tr
JOIN `tabProperty` p
ON tr.property = p.name
WHERE tr.name = %s
""", tenant_renting, as_dict=True)
return data[0]['title'] if data else None
| 29.134328 | 111 | 0.749488 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FMDashboard(Document):
def make_outstanding_balances(self):
self.outstanding_balances = None
outstanding_balances = _get_outstanding_balances(_get_properties(self.real_estate_property))
for outstanding_balance in outstanding_balances:
self.append('outstanding_balances', {
'property_name': outstanding_balance.get('property_name'),
'sales_invoice': outstanding_balance.get('sales_invoice'),
'outstanding_amount': outstanding_balance.get('outstanding_amount')
})
def _get_properties(real_estate_property):
return list(map(lambda x: x['name'], frappe.get_all('Property', {'property_location': real_estate_property})))
def _get_outstanding_balances(filter_properties):
def make_data(balance):
property_name = _get_property_name(balance.get('pm_tenant_renting'))
return {
'property_name': property_name,
'sales_invoice': balance.get('name'),
'outstanding_amount': balance.get('outstanding_amount')
}
outstanding = frappe.db.sql("""
SELECT
si.name,
si.pm_tenant_renting,
si.outstanding_amount,
tr.property
FROM `tabSales Invoice` si
LEFT JOIN `tabTenant Renting` tr ON si.pm_tenant_renting = tr.name
WHERE si.docstatus = 1
AND si.outstanding_amount > 0
AND si.pm_tenant_renting != ''
""", as_dict=True)
outstanding = filter(lambda x: x['property'] in filter_properties, outstanding)
return list(map(make_data, outstanding))
def _get_property_name(tenant_renting):
data = frappe.db.sql("""
SELECT p.title
FROM `tabTenant Renting` tr
JOIN `tabProperty` p
ON tr.property = p.name
WHERE tr.name = %s
""", tenant_renting, as_dict=True)
return data[0]['title'] if data else None
| true | true |
f71be05c5a13840628f874af73f7d2dd87d4c2db | 765 | py | Python | ImagePoster/urls.py | AllyxMiko/ImagePoster | 70b95a74b3ddb639e658f6b780dae5351947d6d6 | [
"MIT"
] | null | null | null | ImagePoster/urls.py | AllyxMiko/ImagePoster | 70b95a74b3ddb639e658f6b780dae5351947d6d6 | [
"MIT"
] | null | null | null | ImagePoster/urls.py | AllyxMiko/ImagePoster | 70b95a74b3ddb639e658f6b780dae5351947d6d6 | [
"MIT"
] | null | null | null | """ImagePoster URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.urls.conf import include
urlpatterns = [
path('images/', include('images.urls')),
]
| 34.772727 | 77 | 0.709804 | from django.urls import path
from django.urls.conf import include
urlpatterns = [
path('images/', include('images.urls')),
]
| true | true |
f71be08bb68624aa77a36c61980a89daf8d0c77e | 1,386 | py | Python | setup.py | audiosamsung/brentvollebregt | 077bbb8601b6c179d7b750a7e31ee141bd5b0644 | [
"MIT"
] | 1 | 2021-11-09T10:24:40.000Z | 2021-11-09T10:24:40.000Z | setup.py | audiosamsung/brentvollebregt | 077bbb8601b6c179d7b750a7e31ee141bd5b0644 | [
"MIT"
] | null | null | null | setup.py | audiosamsung/brentvollebregt | 077bbb8601b6c179d7b750a7e31ee141bd5b0644 | [
"MIT"
] | null | null | null | from io import open
from setuptools import setup
from auto_py_to_exe import __version__ as version
setup(
name='auto-py-to-exe',
version=version,
url='https://github.com/brentvollebregt/auto-py-to-exe',
license='MIT',
author='Brent Vollebregt',
author_email='brent@nitratine.net',
description='Converts .py to .exe using a simple graphical interface.',
long_description=''.join(open('README.md', encoding='utf-8').readlines()),
long_description_content_type='text/markdown',
keywords=['gui', 'executable'],
packages=['auto_py_to_exe'],
include_package_data=True,
install_requires=['Eel==0.12.4', 'pyinstaller>=4.1'],
python_requires='>=3.5',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
],
entry_points={
'console_scripts': [
'autopytoexe=auto_py_to_exe.__main__:run',
'auto-py-to-exe=auto_py_to_exe.__main__:run'
],
},
)
| 35.538462 | 78 | 0.629149 | from io import open
from setuptools import setup
from auto_py_to_exe import __version__ as version
setup(
name='auto-py-to-exe',
version=version,
url='https://github.com/brentvollebregt/auto-py-to-exe',
license='MIT',
author='Brent Vollebregt',
author_email='brent@nitratine.net',
description='Converts .py to .exe using a simple graphical interface.',
long_description=''.join(open('README.md', encoding='utf-8').readlines()),
long_description_content_type='text/markdown',
keywords=['gui', 'executable'],
packages=['auto_py_to_exe'],
include_package_data=True,
install_requires=['Eel==0.12.4', 'pyinstaller>=4.1'],
python_requires='>=3.5',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
],
entry_points={
'console_scripts': [
'autopytoexe=auto_py_to_exe.__main__:run',
'auto-py-to-exe=auto_py_to_exe.__main__:run'
],
},
)
| true | true |
f71be0c7310b1e5a59e2faa1aa48d7c69c065244 | 41,236 | py | Python | pyscf/fci/selected_ci.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | 1 | 2020-04-07T21:12:08.000Z | 2020-04-07T21:12:08.000Z | pyscf/fci/selected_ci.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | 2 | 2019-09-16T17:58:31.000Z | 2019-09-22T17:26:01.000Z | pyscf/fci/selected_ci.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Selected CI
Simple usage::
>>> from pyscf import gto, scf, ao2mo, fci
>>> mol = gto.M(atom='C 0 0 0; C 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)
>>> h2 = ao2mo.kernel(mol, mf.mo_coeff)
>>> e = fci.selected_ci.kernel(h1, h2, mf.mo_coeff.shape[1], mol.nelectron)[0]
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import rdm
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
link_index = _all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
fcivec = ci_coeff.reshape(na,nb)
# (bb|bb)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
ci1T = numpy.zeros((nb,na))
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ci1T.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
ci1 = lib.transpose(ci1T, out=fcivecT)
else:
ci1 = numpy.zeros_like(fcivec)
# (aa|aa)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
h_ps = numpy.einsum('pqqs->ps', eri)
eri1 = eri * 2
for k in range(norb):
eri1[:,:,k,k] += h_ps/nelec[0]
eri1[k,k,:,:] += h_ps/nelec[1]
eri1 = ao2mo.restore(4, eri1, norb)
# (bb|aa)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):
strs = numpy.asarray(strs, dtype=numpy.int64)
nstrs = len(strs)
nvir = norb - nelec
strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)
libfci.SCIselect_strs.restype = ctypes.c_int
nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
eri_pq_max.ctypes.data_as(ctypes.c_void_p),
civec_max.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(myci.select_cutoff),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
strs_add = sorted(set(strs_add[:nadd]) - set(strs))
return numpy.asarray(strs_add, dtype=numpy.int64)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)
na = len(strsa)
nb = len(strsb)
ci0 = ci_coeff.reshape(-1,na,nb)
civec_a_max = lib.norm(ci0, axis=2).max(axis=0)
civec_b_max = lib.norm(ci0, axis=1).max(axis=0)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
civec_b_max = civec_b_max[ci_bidx]
strsa = strsa[ci_aidx]
strsb = strsb[ci_bidx]
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])
strsa = numpy.append(strsa, strsa_add)
strsb = numpy.append(strsb, strsb_add)
aidx = numpy.argsort(strsa)
bidx = numpy.argsort(strsb)
ci_strs = (strsa[aidx], strsb[bidx])
aidx = numpy.where(aidx < len(ci_aidx))[0]
bidx = numpy.where(bidx < len(ci_bidx))[0]
ma = len(strsa)
mb = len(strsb)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(_as_SCIvector(ci1, ci_strs))
if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:
cs = cs[0]
return cs
def cre_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)
libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nstrs),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def cre_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return cre_des_linkstr(strs, norb, nelec, True)
def des_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 2:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)
ninter = len(inter1)
inter = numpy.empty((ninter*nelec), dtype=numpy.int64)
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
inter1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec-1),
ctypes.c_int(ninter))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 2
link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)
libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def des_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return des_des_linkstr(strs, norb, nelec, True)
def gen_des_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 1:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 1
link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)
libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec == norb:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)
libfci.SCIcre_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)
libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
hdiag = numpy.empty(na*nb)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
occslsta = cistring._strs2occslst(ci_strs[0], norb)
occslstb = cistring._strs2occslst(ci_strs[1], norb)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
if isinstance(ci0, _SCIvector):
if ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.reshape(-1)
precond = lambda x, e, *args: x/(hdiag-e+1e-4)
#e, c = lib.davidson(hop, ci0, precond, tol=myci.conv_tol)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
# TODO: initial guess from CISD
if isinstance(ci0, _SCIvector):
if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),
numpy.asarray([int('1'*nelec[1], 2)]))
ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
log.warn('''
Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.
HOMO->LUMO excitations are included in the initial guess.
NOTE: This may introduce excited states of different symmetry.\n''')
corea = '1' * (nelec[0]-1)
coreb = '1' * (nelec[1]-1)
ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),
numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))
ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
raise RuntimeError('Not enough selected-CI space for %d states' % nroots)
ci_strs = ci0._strs
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.ravel()
precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)
namax = cistring.num_strings(norb, nelec[0])
nbmax = cistring.num_strings(norb, nelec[1])
e_last = 0
float_tol = myci.start_tol
tol_decay_rate = myci.tol_decay_rate
conv = False
for icycle in range(norb):
ci_strs = ci0[0]._strs
float_tol = max(float_tol*tol_decay_rate, tol*1e2)
log.debug('cycle %d ci.shape %s float_tol %g',
icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
#e, ci0 = lib.davidson(hop, ci0.reshape(-1), precond, tol=float_tol)
e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]
de, e_last = min(e)-e_last, min(e)
log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)
else:
ci0 = [_as_SCIvector(ci0, ci_strs)]
de, e_last = e-e_last, e
log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)
if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:
conv = True
break
last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
na = len(ci0[0]._strs[0])
nb = len(ci0[0]._strs[1])
if ((.99 < na/last_ci0_size[0] < 1.01) and
(.99 < nb/last_ci0_size[1] < 1.01)):
conv = True
break
ci_strs = ci0[0]._strs
log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
if nroots > 1:
for i, ei in enumerate(e+ecore):
log.info('Selected CI state %d E = %.15g', i, ei)
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
log.info('Selected CI E = %.15g', e+ecore)
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
def make_rdm1s(civec_strs, norb, nelec, link_index=None):
'''Spin separated 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta)
dm1[p,q] = <q^\dagger p>
The convention is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def make_rdm1(civec_strs, norb, nelec, link_index=None):
r'''Spin-traced 1-particle density matrix.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention is based on McWeeney's book, Eq (5.4.20)
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
# dm[p,q,r,s] = <|p^+ q r^+ s|>
def make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin separated 2-particle density matrices.
The return values include three density matrices:
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta), (beta,beta,beta,beta)
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
fcivec = ci_coeff.reshape(na,nb)
# (bb|aa) and (aa|bb)
dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, (cd_indexa,cd_indexb), 0)[1]
# (aa|aa)
dm2aa = numpy.zeros([norb]*4)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2aa.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
# (bb|bb)
dm2bb = numpy.zeros([norb]*4)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2bb.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
return dm2aa, dm2ab, dm2bb
def make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin-traced two-particle density matrix.
2pdm[p,q,r,s] = :math:`\langle p_\alpha^\dagger r_\alpha^\dagger s_\alpha q_\alpha\rangle +
\langle p_\beta^\dagger r_\alpha^\dagger s_\alpha q_\beta\rangle +
\langle p_\alpha^\dagger r_\beta^\dagger s_\beta q_\alpha\rangle +
\langle p_\beta^\dagger r_\beta^\dagger s_\beta q_\beta\rangle`.
'''
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
dm2aa += dm2bb
dm2aa += dm2ab
dm2aa += dm2ab.transpose(2,3,0,1)
return dm2aa
def trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin separated transition 1-particle density matrices.
See also function :func:`make_rdm1s`
1pdm[p,q] = :math:`\langle q^\dagger p \rangle`
'''
cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)
ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)
assert(all(ci_strs[0] == ci_strs1[0]) and
all(ci_strs[1] == ci_strs1[1]))
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin traced transition 1-particle density matrices.
See also function :func:`make_rdm1`
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle
+ \langle q_\beta^\dagger p_\beta \rangle`
'''
rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def spin_square(civec_strs, norb, nelec):
'''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated
Hamiltonian)'''
ci1 = contract_ss(civec_strs, norb, nelec)
ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def contract_ss(civec_strs, norb, nelec):
r''' S^2 |\Psi\rangle
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
strsa, strsb = ci_strs
neleca, nelecb = nelec
ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))
def gen_map(fstr_index, strs, nelec, des=True):
a_index = fstr_index(strs, norb, nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(gen_des_linkstr, strsa, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(gen_des_linkstr, strsb, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(gen_cre_linkstr, strsa, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex):
if aindex is None or bindex is None:
return None
ma = len(aindex)
mb = len(bindex)
t1 = numpy.zeros((ma,mb))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(ci_coeff, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: t1[addra.reshape(-1,1),addrb] += citmp
lib.takebak_2d(t1, citmp, maska, maskb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: ci1[maska.reshape(-1,1), maskb] += citmp
lib.takebak_2d(ci1, citmp, addra, addrb)
ci1 = numpy.zeros_like(ci_coeff)
trans(ci1, ades, bcre) # S+*S-
trans(ci1, acre, bdes) # S-*S+
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*ci_coeff
return _as_SCIvector(ci1, ci_strs)
def to_fci(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
ci0 = numpy.zeros((na,nb))
lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)
return ci0
def from_fci(fcivec, ci_strs, norb, nelec):
fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
fcivec = fcivec.reshape(na,nb)
civec = lib.take_2d(fcivec, addrsa, addrsb)
return _as_SCIvector(civec, ci_strs)
class SelectedCI(direct_spin1.FCISolver):
ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)
select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)
conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)
start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)
tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)
def __init__(self, mol=None):
direct_spin1.FCISolver.__init__(self, mol)
##################################################
# don't modify the following attributes, they are not input options
#self.converged = False
#self.ci = None
self._strs = None
keys = set(('ci_coeff_cutoff', 'select_cutoff', 'conv_tol',
'start_tol', 'tol_decay_rate'))
self._keys = self._keys.union(keys)
def dump_flags(self, verbose=None):
direct_spin1.FCISolver.dump_flags(self, verbose)
logger.info(self, 'ci_coeff_cutoff %g', self.ci_coeff_cutoff)
logger.info(self, 'select_cutoff %g', self.select_cutoff)
def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):
# The argument civec_strs is a CI vector in function FCISolver.contract_2e.
# Save and patch self._strs to make this contract_2e function compatible to
# FCISolver.contract_2e.
if getattr(civec_strs, '_strs', None) is not None:
self._strs = civec_strs._strs
else:
assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))
civec_strs = _as_SCIvector(civec_strs, self._strs)
return contract_2e(eri, civec_strs, norb, nelec, link_index)
def get_init_guess(self, ci_strs, norb, nelec, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
na = len(ci_strs[0])
nb = len(ci_strs[1])
ci0 = direct_spin1._get_init_guess(na, nb, nroots, hdiag)
return [_as_SCIvector(x, ci_strs) for x in ci0]
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
kernel = kernel_float_space
kernel_fixed_space = kernel_fixed_space
# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,
# tol=None, lindep=None, max_cycle=None,
# max_memory=None, verbose=None, **kwargs):
# ci_strs = getattr(ci0, '_strs', self._strs)
# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,
# ci0, link_index, tol, lindep, 6,
# max_memory, verbose, **kwargs)
@lib.with_doc(spin_square.__doc__)
def spin_square(self, civec_strs, norb, nelec):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)
def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)
addra, addrb = numpy.where(abs(ci) > tol)
if return_strs:
strsa = [bin(x) for x in strsa[addra]]
strsb = [bin(x) for x in strsb[addrb]]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa[addra], norb)
occslstb = cistring._strs2occslst(strsb[addrb], norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def contract_ss(self, fcivec, norb, nelec):
return contract_ss(fcivec, norb, nelec)
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm1s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
@lib.with_doc(make_rdm2s.__doc__)
def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm2.__doc__)
def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2(civec_strs, norb, nelec, link_index)
def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
if neleca > 1 and nelecb > 1:
dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)
dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)
else:
dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
nelec_tot = sum(nelec)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2 = make_rdm2(civec_strs, norb, nelec, link_index)
if nelec_tot > 1:
dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)
else:
dm1 = make_rdm1(civec_strs, norb, nelec, link_index)
return dm1, dm2
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):
if spin is None:
spin = self.spin
if ci_strs is None:
ci_strs = self._strs
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
if tril:
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)
else:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
SCI = SelectedCI
def _unpack(civec_strs, nelec, ci_strs=None, spin=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
ci_strs = getattr(civec_strs, '_strs', ci_strs)
if ci_strs is not None:
strsa, strsb = ci_strs
strsa = numpy.asarray(strsa)
strsb = numpy.asarray(strsb)
ci_strs = (strsa, strsb)
return civec_strs, (neleca, nelecb), ci_strs
def _all_linkstr_index(ci_strs, norb, nelec):
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
# numpy.ndarray does not allow to attach attribtues. Overwrite the
# numpy.ndarray class to tag the ._strs attribute
class _SCIvector(numpy.ndarray):
def __array_finalize__(self, obj):
self._strs = getattr(obj, '_strs', None)
# Whenever the contents of the array was modified (through ufunc), the tag
# should be expired. Overwrite the output of ufunc to restore ndarray type.
def __array_wrap__(self, out, context=None):
return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)
def _as_SCIvector(civec, ci_strs):
civec = civec.view(_SCIvector)
civec._strs = ci_strs
return civec
def _as_SCIvector_if_not(civec, ci_strs):
if getattr(civec, '_strs', None) is None:
civec = _as_SCIvector(civec, ci_strs)
return civec
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.fci import spin_op
from pyscf.fci import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
print(c1.shape, c2.shape)
dm1_1 = make_rdm1(c1, norb, nelec)
dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)
print(abs(dm1_1 - dm1_2).sum())
dm2_1 = make_rdm2(c1, norb, nelec)
dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]
print(abs(dm2_1 - dm2_2).sum())
myci = SelectedCI()
e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)
print(e - -11.894559902235565)
print(myci.large_ci(c1, norb, nelec))
print(myci.spin_square(c1, norb, nelec)[0] -
spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])
myci = SelectedCI()
myci = addons.fix_spin_(myci)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.89467612053687)
print(myci.spin_square(c1, norb, nelec))
| 42.599174 | 105 | 0.607964 |
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import rdm
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
link_index = _all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
fcivec = ci_coeff.reshape(na,nb)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
ci1T = numpy.zeros((nb,na))
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ci1T.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
ci1 = lib.transpose(ci1T, out=fcivecT)
else:
ci1 = numpy.zeros_like(fcivec)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
h_ps = numpy.einsum('pqqs->ps', eri)
eri1 = eri * 2
for k in range(norb):
eri1[:,:,k,k] += h_ps/nelec[0]
eri1[k,k,:,:] += h_ps/nelec[1]
eri1 = ao2mo.restore(4, eri1, norb)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):
strs = numpy.asarray(strs, dtype=numpy.int64)
nstrs = len(strs)
nvir = norb - nelec
strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)
libfci.SCIselect_strs.restype = ctypes.c_int
nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
eri_pq_max.ctypes.data_as(ctypes.c_void_p),
civec_max.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(myci.select_cutoff),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
strs_add = sorted(set(strs_add[:nadd]) - set(strs))
return numpy.asarray(strs_add, dtype=numpy.int64)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)
na = len(strsa)
nb = len(strsb)
ci0 = ci_coeff.reshape(-1,na,nb)
civec_a_max = lib.norm(ci0, axis=2).max(axis=0)
civec_b_max = lib.norm(ci0, axis=1).max(axis=0)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
civec_b_max = civec_b_max[ci_bidx]
strsa = strsa[ci_aidx]
strsb = strsb[ci_bidx]
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])
strsa = numpy.append(strsa, strsa_add)
strsb = numpy.append(strsb, strsb_add)
aidx = numpy.argsort(strsa)
bidx = numpy.argsort(strsb)
ci_strs = (strsa[aidx], strsb[bidx])
aidx = numpy.where(aidx < len(ci_aidx))[0]
bidx = numpy.where(bidx < len(ci_bidx))[0]
ma = len(strsa)
mb = len(strsb)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(_as_SCIvector(ci1, ci_strs))
if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:
cs = cs[0]
return cs
def cre_des_linkstr(strs, norb, nelec, tril=False):
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)
libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nstrs),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def cre_des_linkstr_tril(strs, norb, nelec):
return cre_des_linkstr(strs, norb, nelec, True)
def des_des_linkstr(strs, norb, nelec, tril=False):
if nelec < 2:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)
ninter = len(inter1)
inter = numpy.empty((ninter*nelec), dtype=numpy.int64)
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
inter1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec-1),
ctypes.c_int(ninter))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 2
link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)
libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def des_des_linkstr_tril(strs, norb, nelec):
return des_des_linkstr(strs, norb, nelec, True)
def gen_des_linkstr(strs, norb, nelec):
if nelec < 1:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 1
link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)
libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_linkstr(strs, norb, nelec):
if nelec == norb:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)
libfci.SCIcre_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)
libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
hdiag = numpy.empty(na*nb)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
occslsta = cistring._strs2occslst(ci_strs[0], norb)
occslstb = cistring._strs2occslst(ci_strs[1], norb)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
if isinstance(ci0, _SCIvector):
if ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.reshape(-1)
precond = lambda x, e, *args: x/(hdiag-e+1e-4)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
if isinstance(ci0, _SCIvector):
if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),
numpy.asarray([int('1'*nelec[1], 2)]))
ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
log.warn('''
Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.
HOMO->LUMO excitations are included in the initial guess.
NOTE: This may introduce excited states of different symmetry.\n''')
corea = '1' * (nelec[0]-1)
coreb = '1' * (nelec[1]-1)
ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),
numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))
ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
raise RuntimeError('Not enough selected-CI space for %d states' % nroots)
ci_strs = ci0._strs
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.ravel()
precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)
namax = cistring.num_strings(norb, nelec[0])
nbmax = cistring.num_strings(norb, nelec[1])
e_last = 0
float_tol = myci.start_tol
tol_decay_rate = myci.tol_decay_rate
conv = False
for icycle in range(norb):
ci_strs = ci0[0]._strs
float_tol = max(float_tol*tol_decay_rate, tol*1e2)
log.debug('cycle %d ci.shape %s float_tol %g',
icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]
de, e_last = min(e)-e_last, min(e)
log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)
else:
ci0 = [_as_SCIvector(ci0, ci_strs)]
de, e_last = e-e_last, e
log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)
if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:
conv = True
break
last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
na = len(ci0[0]._strs[0])
nb = len(ci0[0]._strs[1])
if ((.99 < na/last_ci0_size[0] < 1.01) and
(.99 < nb/last_ci0_size[1] < 1.01)):
conv = True
break
ci_strs = ci0[0]._strs
log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
if nroots > 1:
for i, ei in enumerate(e+ecore):
log.info('Selected CI state %d E = %.15g', i, ei)
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
log.info('Selected CI E = %.15g', e+ecore)
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
def make_rdm1s(civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def make_rdm1(civec_strs, norb, nelec, link_index=None):
rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
fcivec = ci_coeff.reshape(na,nb)
dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, (cd_indexa,cd_indexb), 0)[1]
dm2aa = numpy.zeros([norb]*4)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2aa.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
dm2bb = numpy.zeros([norb]*4)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2bb.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
return dm2aa, dm2ab, dm2bb
def make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
dm2aa += dm2bb
dm2aa += dm2ab
dm2aa += dm2ab.transpose(2,3,0,1)
return dm2aa
def trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):
cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)
ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)
assert(all(ci_strs[0] == ci_strs1[0]) and
all(ci_strs[1] == ci_strs1[1]))
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):
rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def spin_square(civec_strs, norb, nelec):
ci1 = contract_ss(civec_strs, norb, nelec)
ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def contract_ss(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
strsa, strsb = ci_strs
neleca, nelecb = nelec
ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))
def gen_map(fstr_index, strs, nelec, des=True):
a_index = fstr_index(strs, norb, nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(gen_des_linkstr, strsa, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(gen_des_linkstr, strsb, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(gen_cre_linkstr, strsa, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex):
if aindex is None or bindex is None:
return None
ma = len(aindex)
mb = len(bindex)
t1 = numpy.zeros((ma,mb))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(ci_coeff, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
lib.takebak_2d(t1, citmp, maska, maskb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
lib.takebak_2d(ci1, citmp, addra, addrb)
ci1 = numpy.zeros_like(ci_coeff)
trans(ci1, ades, bcre)
trans(ci1, acre, bdes)
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*ci_coeff
return _as_SCIvector(ci1, ci_strs)
def to_fci(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
ci0 = numpy.zeros((na,nb))
lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)
return ci0
def from_fci(fcivec, ci_strs, norb, nelec):
fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
fcivec = fcivec.reshape(na,nb)
civec = lib.take_2d(fcivec, addrsa, addrsb)
return _as_SCIvector(civec, ci_strs)
class SelectedCI(direct_spin1.FCISolver):
ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)
select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)
conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)
start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)
tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)
def __init__(self, mol=None):
direct_spin1.FCISolver.__init__(self, mol)
et_init_guess(na, nb, nroots, hdiag)
return [_as_SCIvector(x, ci_strs) for x in ci0]
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
kernel = kernel_float_space
kernel_fixed_space = kernel_fixed_space
# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,
# tol=None, lindep=None, max_cycle=None,
# max_memory=None, verbose=None, **kwargs):
# ci_strs = getattr(ci0, '_strs', self._strs)
# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,
# ci0, link_index, tol, lindep, 6,
# max_memory, verbose, **kwargs)
@lib.with_doc(spin_square.__doc__)
def spin_square(self, civec_strs, norb, nelec):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)
def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)
addra, addrb = numpy.where(abs(ci) > tol)
if return_strs:
strsa = [bin(x) for x in strsa[addra]]
strsb = [bin(x) for x in strsb[addrb]]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa[addra], norb)
occslstb = cistring._strs2occslst(strsb[addrb], norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def contract_ss(self, fcivec, norb, nelec):
return contract_ss(fcivec, norb, nelec)
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm1s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
@lib.with_doc(make_rdm2s.__doc__)
def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm2.__doc__)
def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2(civec_strs, norb, nelec, link_index)
def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
if neleca > 1 and nelecb > 1:
dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)
dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)
else:
dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
nelec_tot = sum(nelec)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2 = make_rdm2(civec_strs, norb, nelec, link_index)
if nelec_tot > 1:
dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)
else:
dm1 = make_rdm1(civec_strs, norb, nelec, link_index)
return dm1, dm2
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):
if spin is None:
spin = self.spin
if ci_strs is None:
ci_strs = self._strs
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
if tril:
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)
else:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
SCI = SelectedCI
def _unpack(civec_strs, nelec, ci_strs=None, spin=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
ci_strs = getattr(civec_strs, '_strs', ci_strs)
if ci_strs is not None:
strsa, strsb = ci_strs
strsa = numpy.asarray(strsa)
strsb = numpy.asarray(strsb)
ci_strs = (strsa, strsb)
return civec_strs, (neleca, nelecb), ci_strs
def _all_linkstr_index(ci_strs, norb, nelec):
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
# numpy.ndarray does not allow to attach attribtues. Overwrite the
# numpy.ndarray class to tag the ._strs attribute
class _SCIvector(numpy.ndarray):
def __array_finalize__(self, obj):
self._strs = getattr(obj, '_strs', None)
# Whenever the contents of the array was modified (through ufunc), the tag
# should be expired. Overwrite the output of ufunc to restore ndarray type.
def __array_wrap__(self, out, context=None):
return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)
def _as_SCIvector(civec, ci_strs):
civec = civec.view(_SCIvector)
civec._strs = ci_strs
return civec
def _as_SCIvector_if_not(civec, ci_strs):
if getattr(civec, '_strs', None) is None:
civec = _as_SCIvector(civec, ci_strs)
return civec
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.fci import spin_op
from pyscf.fci import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
print(c1.shape, c2.shape)
dm1_1 = make_rdm1(c1, norb, nelec)
dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)
print(abs(dm1_1 - dm1_2).sum())
dm2_1 = make_rdm2(c1, norb, nelec)
dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]
print(abs(dm2_1 - dm2_2).sum())
myci = SelectedCI()
e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)
print(e - -11.894559902235565)
print(myci.large_ci(c1, norb, nelec))
print(myci.spin_square(c1, norb, nelec)[0] -
spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])
myci = SelectedCI()
myci = addons.fix_spin_(myci)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.89467612053687)
print(myci.spin_square(c1, norb, nelec))
| true | true |
f71be110a2784a6f4b942989944478aaf8facaaa | 18,030 | py | Python | ultra/tests/test_evaluate.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | 2 | 2021-12-13T12:41:54.000Z | 2021-12-16T03:10:24.000Z | ultra/tests/test_evaluate.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | null | null | null | ultra/tests/test_evaluate.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import os
import pickle
import re
import shutil
import time
import unittest
import dill
import gym
import ray
from ultra.baselines.agent_spec import BaselineAgentSpec
from ultra.baselines.sac.sac.policy import SACPolicy
from ultra.evaluate import collect_evaluations, evaluate, evaluation_check
from ultra.utils.episode import episodes
seed = 2
AGENT_ID = "001"
class EvaluateTest(unittest.TestCase):
# Put generated files and folders in this directory.
OUTPUT_DIRECTORY = "tests/evaluate_test/"
@classmethod
def setUpClass(cls):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00 --level eval_test --root-dir tests/scenarios "
" --save-dir tests/task/eval_test/"
)
multiagent_generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00-multiagent --level eval_test --root-dir tests/scenarios "
"--save-dir tests/task/eval_test_multiagent/"
)
train_command = (
"python ultra/train.py "
"--task 00 --level eval_test --policy sac --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {path}"
)
multiagent_train_command = (
"python ultra/train.py "
"--task 00-multiagent --level eval_test --policy sac,dqn,ppo --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {multiagent_path}"
)
# Generate the scenarios.
os.system(generate_command)
os.system(multiagent_generate_command)
# Remove existing models
if os.path.exists(path):
shutil.rmtree(path)
if os.path.exists(multiagent_path):
shutil.rmtree(multiagent_path)
# Generate models before evaluation tests
if not os.path.exists(path):
os.system(train_command)
if not os.path.exists(multiagent_path):
os.system(multiagent_train_command)
def test_a_folders(self):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
if not os.path.exists(path):
self.assertTrue(False)
path = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models")
)[0]
if len(os.listdir(path)) == 0:
self.assertTrue(False)
path = "tests/task/eval_test"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
if not os.path.exists(path):
self.assertTrue(False)
multiagent_path = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models"
)
)[0]
if len(os.listdir(multiagent_path)) < 2:
self.assertTrue(False)
multiagent_path = "tests/task/eval_test_multiagent"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
def test_evaluation_check(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_logs/")
# ray.init(ignore_reinit_error=True)
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00", "eval_test"), num_agents=1, log_dir=log_dir
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluation_check_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_multiagent_logs/"
)
# ray.init(ignore_reinit_error=True)
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00-multiagent", "eval_test"),
num_agents=3,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_logs/")
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00 --level eval_test --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_multiagent_logs/"
)
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00-multiagent --level eval_test --agents 000 --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_agent(self):
seed = 2
models_directory = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models/")
)[0]
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_agent_logs/")
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
# This test performs evaluation on multiple agents, but the test map
# that is created can only support one agent. Skip this for now until
# we can specify a map to use that supports multiple agents.
@unittest.skip("Test map does not yet support multiple agents.")
def test_evaluate_multiagent(self):
seed = 2
models_directory = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models/"
)
)[0]
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_multiagent_logs/"
)
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00-multiagent", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
def test_record_evaluation_at_proper_episode_indices(self):
"""Due to parallelization, there might arise a situation where the episode
object at the beginning of an evaluation would not match the episode
object when recording to tensorboard. This test ensures that the evaluation data
(for both test and train scenarios) is recorded at the proper episode index.
"""
AGENT_ID = "000"
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_episode_check_log/"
)
# Arbitary values for evaluation rate and number of training episodes
eval_rate = 4
num_episodes = 20
train_command = (
"python ultra/train.py "
f"--task 00 --level eval_test --policy sac --headless --episodes {num_episodes} "
f"--eval-rate {eval_rate} --eval-episodes 2 --max-episode-steps 2 --log-dir {log_dir}"
)
if not os.path.exists(log_dir):
os.system(train_command)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation/results.pkl"
),
"rb",
) as handle:
evaluation_results = dill.load(handle)
# Check if the episode indices are divisible by the evaluation rate. If they
# do, then the evaluation data is properly saved under the results.pkl
# and also correctly added to the tensorboard
for index in evaluation_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation_Training/results.pkl"
),
"rb",
) as handle:
evaluation_training_results = dill.load(handle)
# Check if the episode indices are divisible by the evaluation rate. If they
# do, then the evaluation training data is properly saved under the results.pkl
# and also correctly added to the tensorboard
for index in evaluation_training_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
def test_extract_policy_from_path(self):
paths = [
"from.ultra.baselines.sac:sac-v0",
"hello.ultra.ppo:ppo-v1",
"ultra.custom:custom",
"a.sb.ultra.c.d.e.sac:sac-v99",
"a.b.c.d.e.ultra.custom_agent.policy:MBPO-v2",
]
def extract(path):
m = re.search(
"ultra(.)*([a-zA-Z0-9_]*.)+([a-zA-Z0-9_])+:[a-zA-Z0-9_]+((-)*[a-zA-Z0-9_]*)*",
path,
)
try:
policy_class = m.group(0) # pytype: disable=attribute-error
except AttributeError as e:
self.assertTrue(False)
for path in paths:
extract(path)
# @classmethod
# def tearDownClass(cls):
# os.system("ray stop")
@classmethod
def tearDownClass(cls):
if os.path.exists(EvaluateTest.OUTPUT_DIRECTORY):
shutil.rmtree(EvaluateTest.OUTPUT_DIRECTORY)
if os.path.exists("tests/task/eval_test/"):
shutil.rmtree("tests/task/eval_test/")
if os.path.exists("tests/task/eval_test_multiagent/"):
shutil.rmtree("tests/task/eval_test_multiagent/")
def run_experiment(scenario_info, num_agents, log_dir, headless=True):
agent_ids = ["0" * max(0, 3 - len(str(i))) + str(i) for i in range(num_agents)]
agent_classes = {agent_id: "ultra.baselines.sac:sac-v0" for agent_id in agent_ids}
agent_specs = {
agent_id: BaselineAgentSpec(policy_class=SACPolicy, max_episode_steps=2)
for agent_id in agent_ids
}
env = gym.make(
"ultra.env:ultra-v0",
agent_specs=agent_specs,
scenario_info=scenario_info,
headless=headless,
timestep_sec=0.1,
seed=seed,
)
agents = {
agent_id: agent_spec.build_agent()
for agent_id, agent_spec in agent_specs.items()
}
total_step = 0
etag = ":".join([policy_class.split(":")[-1] for policy_class in agent_classes])
evaluation_task_ids = dict()
for episode in episodes(1, etag=etag, log_dir=log_dir):
observations = env.reset()
dones = {"__all__": False}
infos = None
episode.reset()
experiment_dir = episode.experiment_dir
if not os.path.exists(f"{experiment_dir}/agent_metadata.pkl"):
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
with open(f"{experiment_dir}/agent_metadata.pkl", "wb") as metadata_file:
dill.dump(
{
"agent_ids": agent_ids,
"agent_classes": agent_classes,
"agent_specs": agent_specs,
},
metadata_file,
pickle.HIGHEST_PROTOCOL,
)
while not dones["__all__"]:
evaluation_check(
agents=agents,
agent_ids=agent_ids,
episode=episode,
eval_rate=10,
eval_episodes=1,
max_episode_steps=2,
policy_classes=agent_classes,
scenario_info=scenario_info,
evaluation_task_ids=evaluation_task_ids,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
collect_evaluations(evaluation_task_ids=evaluation_task_ids)
actions = {
agent_id: agents[agent_id].act(observation, explore=True)
for agent_id, observation in observations.items()
}
next_observations, rewards, dones, infos = env.step(actions)
active_agent_ids = observations.keys() & next_observations.keys()
# pytype: disable=attribute-error
loss_outputs = {
agent_id: agents[agent_id].step(
state=observations[agent_id],
action=actions[agent_id],
reward=rewards[agent_id],
next_state=next_observations[agent_id],
done=dones[agent_id],
info=infos[agent_id],
)
for agent_id in active_agent_ids
}
# pytype: enable=attribute-error
episode.record_step(
agent_ids_to_record=active_agent_ids,
infos=infos,
rewards=rewards,
total_step=total_step,
loss_outputs=loss_outputs,
)
total_step += 1
observations = next_observations
# Wait on the remaining evaluations to finish.
while collect_evaluations(evaluation_task_ids):
time.sleep(0.1)
env.close()
| 35.916335 | 101 | 0.59279 |
import glob
import os
import pickle
import re
import shutil
import time
import unittest
import dill
import gym
import ray
from ultra.baselines.agent_spec import BaselineAgentSpec
from ultra.baselines.sac.sac.policy import SACPolicy
from ultra.evaluate import collect_evaluations, evaluate, evaluation_check
from ultra.utils.episode import episodes
seed = 2
AGENT_ID = "001"
class EvaluateTest(unittest.TestCase):
OUTPUT_DIRECTORY = "tests/evaluate_test/"
@classmethod
def setUpClass(cls):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00 --level eval_test --root-dir tests/scenarios "
" --save-dir tests/task/eval_test/"
)
multiagent_generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00-multiagent --level eval_test --root-dir tests/scenarios "
"--save-dir tests/task/eval_test_multiagent/"
)
train_command = (
"python ultra/train.py "
"--task 00 --level eval_test --policy sac --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {path}"
)
multiagent_train_command = (
"python ultra/train.py "
"--task 00-multiagent --level eval_test --policy sac,dqn,ppo --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {multiagent_path}"
)
os.system(generate_command)
os.system(multiagent_generate_command)
if os.path.exists(path):
shutil.rmtree(path)
if os.path.exists(multiagent_path):
shutil.rmtree(multiagent_path)
if not os.path.exists(path):
os.system(train_command)
if not os.path.exists(multiagent_path):
os.system(multiagent_train_command)
def test_a_folders(self):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
if not os.path.exists(path):
self.assertTrue(False)
path = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models")
)[0]
if len(os.listdir(path)) == 0:
self.assertTrue(False)
path = "tests/task/eval_test"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
if not os.path.exists(path):
self.assertTrue(False)
multiagent_path = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models"
)
)[0]
if len(os.listdir(multiagent_path)) < 2:
self.assertTrue(False)
multiagent_path = "tests/task/eval_test_multiagent"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
def test_evaluation_check(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_logs/")
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00", "eval_test"), num_agents=1, log_dir=log_dir
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluation_check_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_multiagent_logs/"
)
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00-multiagent", "eval_test"),
num_agents=3,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_logs/")
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00 --level eval_test --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_multiagent_logs/"
)
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00-multiagent --level eval_test --agents 000 --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_agent(self):
seed = 2
models_directory = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models/")
)[0]
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_agent_logs/")
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
@unittest.skip("Test map does not yet support multiple agents.")
def test_evaluate_multiagent(self):
seed = 2
models_directory = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models/"
)
)[0]
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_multiagent_logs/"
)
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00-multiagent", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
def test_record_evaluation_at_proper_episode_indices(self):
AGENT_ID = "000"
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_episode_check_log/"
)
eval_rate = 4
num_episodes = 20
train_command = (
"python ultra/train.py "
f"--task 00 --level eval_test --policy sac --headless --episodes {num_episodes} "
f"--eval-rate {eval_rate} --eval-episodes 2 --max-episode-steps 2 --log-dir {log_dir}"
)
if not os.path.exists(log_dir):
os.system(train_command)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation/results.pkl"
),
"rb",
) as handle:
evaluation_results = dill.load(handle)
for index in evaluation_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation_Training/results.pkl"
),
"rb",
) as handle:
evaluation_training_results = dill.load(handle)
for index in evaluation_training_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
def test_extract_policy_from_path(self):
paths = [
"from.ultra.baselines.sac:sac-v0",
"hello.ultra.ppo:ppo-v1",
"ultra.custom:custom",
"a.sb.ultra.c.d.e.sac:sac-v99",
"a.b.c.d.e.ultra.custom_agent.policy:MBPO-v2",
]
def extract(path):
m = re.search(
"ultra(.)*([a-zA-Z0-9_]*.)+([a-zA-Z0-9_])+:[a-zA-Z0-9_]+((-)*[a-zA-Z0-9_]*)*",
path,
)
try:
policy_class = m.group(0)
except AttributeError as e:
self.assertTrue(False)
for path in paths:
extract(path)
@classmethod
def tearDownClass(cls):
if os.path.exists(EvaluateTest.OUTPUT_DIRECTORY):
shutil.rmtree(EvaluateTest.OUTPUT_DIRECTORY)
if os.path.exists("tests/task/eval_test/"):
shutil.rmtree("tests/task/eval_test/")
if os.path.exists("tests/task/eval_test_multiagent/"):
shutil.rmtree("tests/task/eval_test_multiagent/")
def run_experiment(scenario_info, num_agents, log_dir, headless=True):
agent_ids = ["0" * max(0, 3 - len(str(i))) + str(i) for i in range(num_agents)]
agent_classes = {agent_id: "ultra.baselines.sac:sac-v0" for agent_id in agent_ids}
agent_specs = {
agent_id: BaselineAgentSpec(policy_class=SACPolicy, max_episode_steps=2)
for agent_id in agent_ids
}
env = gym.make(
"ultra.env:ultra-v0",
agent_specs=agent_specs,
scenario_info=scenario_info,
headless=headless,
timestep_sec=0.1,
seed=seed,
)
agents = {
agent_id: agent_spec.build_agent()
for agent_id, agent_spec in agent_specs.items()
}
total_step = 0
etag = ":".join([policy_class.split(":")[-1] for policy_class in agent_classes])
evaluation_task_ids = dict()
for episode in episodes(1, etag=etag, log_dir=log_dir):
observations = env.reset()
dones = {"__all__": False}
infos = None
episode.reset()
experiment_dir = episode.experiment_dir
if not os.path.exists(f"{experiment_dir}/agent_metadata.pkl"):
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
with open(f"{experiment_dir}/agent_metadata.pkl", "wb") as metadata_file:
dill.dump(
{
"agent_ids": agent_ids,
"agent_classes": agent_classes,
"agent_specs": agent_specs,
},
metadata_file,
pickle.HIGHEST_PROTOCOL,
)
while not dones["__all__"]:
evaluation_check(
agents=agents,
agent_ids=agent_ids,
episode=episode,
eval_rate=10,
eval_episodes=1,
max_episode_steps=2,
policy_classes=agent_classes,
scenario_info=scenario_info,
evaluation_task_ids=evaluation_task_ids,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
collect_evaluations(evaluation_task_ids=evaluation_task_ids)
actions = {
agent_id: agents[agent_id].act(observation, explore=True)
for agent_id, observation in observations.items()
}
next_observations, rewards, dones, infos = env.step(actions)
active_agent_ids = observations.keys() & next_observations.keys()
loss_outputs = {
agent_id: agents[agent_id].step(
state=observations[agent_id],
action=actions[agent_id],
reward=rewards[agent_id],
next_state=next_observations[agent_id],
done=dones[agent_id],
info=infos[agent_id],
)
for agent_id in active_agent_ids
}
episode.record_step(
agent_ids_to_record=active_agent_ids,
infos=infos,
rewards=rewards,
total_step=total_step,
loss_outputs=loss_outputs,
)
total_step += 1
observations = next_observations
while collect_evaluations(evaluation_task_ids):
time.sleep(0.1)
env.close()
| true | true |
f71be12c5aef84eac371279bc150835aa8551c7d | 27 | py | Python | tess_py_api/TessPyWrap/__init__.py | orel98/tess_py_api | 538cbf64fa795318366ac2ceeec8b15d5cf9ae84 | [
"MIT"
] | 3 | 2022-03-16T09:11:33.000Z | 2022-03-19T19:43:50.000Z | tess_py_api/TessPyWrap/__init__.py | orel98/tess_py_api | 538cbf64fa795318366ac2ceeec8b15d5cf9ae84 | [
"MIT"
] | null | null | null | tess_py_api/TessPyWrap/__init__.py | orel98/tess_py_api | 538cbf64fa795318366ac2ceeec8b15d5cf9ae84 | [
"MIT"
] | null | null | null | from .CpyAPI import CpyAPI
| 13.5 | 26 | 0.814815 | from .CpyAPI import CpyAPI
| true | true |
f71be1fb130e1061491a135bc6bfa210726ceb66 | 650 | py | Python | PythonDesafios/d076.py | adaatii/Python-Curso-em-Video- | 30b37713b3685469558babb93b557b53210f010c | [
"MIT"
] | null | null | null | PythonDesafios/d076.py | adaatii/Python-Curso-em-Video- | 30b37713b3685469558babb93b557b53210f010c | [
"MIT"
] | null | null | null | PythonDesafios/d076.py | adaatii/Python-Curso-em-Video- | 30b37713b3685469558babb93b557b53210f010c | [
"MIT"
] | null | null | null | # Crie um programa que tenha uma tupla única com nomes de produtos
# e seus respectivos preços, na sequência. No final, mostre uma
# listagem de preços, organizando os dados em forma tabular.
listagem = ('Lápis', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 10,
'Compasso', 7.95,
'Mochila', 150.45,
'Canetas', 22.30,
'Livro', 34.50)
print('-'*40)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('-'*40)
for i in range(0, len(listagem)):
if i % 2 == 0:
print(f'{listagem[i]:.<30}', end='')
else:
print(f'R${listagem[i]:>7.2f}')
print('-'*40)
| 28.26087 | 66 | 0.543077 |
listagem = ('Lápis', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 10,
'Compasso', 7.95,
'Mochila', 150.45,
'Canetas', 22.30,
'Livro', 34.50)
print('-'*40)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('-'*40)
for i in range(0, len(listagem)):
if i % 2 == 0:
print(f'{listagem[i]:.<30}', end='')
else:
print(f'R${listagem[i]:>7.2f}')
print('-'*40)
| true | true |
f71be2a3c566dab4f1526507df28e4b0c96bd528 | 4,961 | py | Python | NMT/dataset.py | MISStingting/NMTmodel | 970115d6f9fcd015d7daf3ad0e4844055e2af5d3 | [
"Apache-2.0"
] | null | null | null | NMT/dataset.py | MISStingting/NMTmodel | 970115d6f9fcd015d7daf3ad0e4844055e2af5d3 | [
"Apache-2.0"
] | null | null | null | NMT/dataset.py | MISStingting/NMTmodel | 970115d6f9fcd015d7daf3ad0e4844055e2af5d3 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
# tf.enable_eager_execution()
class Dataset(object):
def get_dataset(self, params, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
features_path = params["train_features_file"]
labels_path = params["train_labels_file"]
elif mode == tf.estimator.ModeKeys.EVAL:
features_path = params["eval_features_file"]
labels_path = params["eval_labels_file"]
elif mode == tf.estimator.ModeKeys.PREDICT:
features_path = params["test_features_file"]
labels_path = params["test_labels_file"]
else:
raise ValueError("wrong mode!!!")
features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
dataset = features_dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params["reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
dataset = dataset.map(lambda src: (src, tf.size(src)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),
padding_values=(tf.constant("<blank>"), 0))
iterator = dataset.make_one_shot_iterator()
src, src_len = iterator.get_next()
features = {
"input": src,
"input_length": src_len
}
labels = None
else:
dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))
dataset = dataset.repeat(params["repeat"]).shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params[
"reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
if params["src_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src[:params["src_max_len"]], tgt))
if params["tgt_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src, tgt[:params["tgt_max_len"]]))
dataset = dataset.map(
lambda src, tgt: (src,
tf.concat((["<s>"], tgt), 0),
tf.concat((tgt, ["</s>"]), 0)),
num_parallel_calls=params["num_parallel_calls"])
dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([])),
padding_values=(
tf.constant("<blank>", dtype=tf.string),
tf.constant("<s>", dtype=tf.string),
tf.constant("</s>", dtype=tf.string),
0,
0))
iterator = dataset.make_one_shot_iterator()
src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()
features = {
"input": src,
"input_length": input_length
}
labels = {
"output_in": tgt_in,
"output_out": tgt_out,
"output_length": output_length
}
return features, labels
@staticmethod
def _load_dataset(features_path, labels_path, mode):
''' 从文件读取dataset
:param mode:
:return:
'''
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
labels_dataset = tf.data.TextLineDataset(filenames=labels_path)
return features_dataset, labels_dataset
elif mode == tf.estimator.ModeKeys.PREDICT:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
return features_dataset, None
data_util = Dataset()
| 48.637255 | 118 | 0.504939 | import tensorflow as tf
class Dataset(object):
def get_dataset(self, params, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
features_path = params["train_features_file"]
labels_path = params["train_labels_file"]
elif mode == tf.estimator.ModeKeys.EVAL:
features_path = params["eval_features_file"]
labels_path = params["eval_labels_file"]
elif mode == tf.estimator.ModeKeys.PREDICT:
features_path = params["test_features_file"]
labels_path = params["test_labels_file"]
else:
raise ValueError("wrong mode!!!")
features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
dataset = features_dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params["reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
dataset = dataset.map(lambda src: (src, tf.size(src)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),
padding_values=(tf.constant("<blank>"), 0))
iterator = dataset.make_one_shot_iterator()
src, src_len = iterator.get_next()
features = {
"input": src,
"input_length": src_len
}
labels = None
else:
dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))
dataset = dataset.repeat(params["repeat"]).shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params[
"reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
if params["src_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src[:params["src_max_len"]], tgt))
if params["tgt_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src, tgt[:params["tgt_max_len"]]))
dataset = dataset.map(
lambda src, tgt: (src,
tf.concat((["<s>"], tgt), 0),
tf.concat((tgt, ["</s>"]), 0)),
num_parallel_calls=params["num_parallel_calls"])
dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([])),
padding_values=(
tf.constant("<blank>", dtype=tf.string),
tf.constant("<s>", dtype=tf.string),
tf.constant("</s>", dtype=tf.string),
0,
0))
iterator = dataset.make_one_shot_iterator()
src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()
features = {
"input": src,
"input_length": input_length
}
labels = {
"output_in": tgt_in,
"output_out": tgt_out,
"output_length": output_length
}
return features, labels
@staticmethod
def _load_dataset(features_path, labels_path, mode):
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
labels_dataset = tf.data.TextLineDataset(filenames=labels_path)
return features_dataset, labels_dataset
elif mode == tf.estimator.ModeKeys.PREDICT:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
return features_dataset, None
data_util = Dataset()
| true | true |
f71be2a65e952c2d97ca924e09c67df982278f0e | 66,367 | py | Python | tools/run_tests/run_tests.py | jtcho/grpc | 99673fcbe341a981c27d2becd572468863bff33b | [
"Apache-2.0"
] | 1 | 2017-09-07T00:48:20.000Z | 2017-09-07T00:48:20.000Z | tools/run_tests/run_tests.py | jtcho/grpc | 99673fcbe341a981c27d2becd572468863bff33b | [
"Apache-2.0"
] | null | null | null | tools/run_tests/run_tests.py | jtcho/grpc | 99673fcbe341a981c27d2becd572468863bff33b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in
tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception(
'Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell',
'builder',
'builder_prefix_arguments',
'venv_relative_python',
'toolchain',
'runner',
'test_name',
'iomgr_platform',
])
def _python_config_generator(name, major, minor, bits, config_vars):
name += '_' + config_vars.iomgr_platform
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0]),
config_vars.test_name
])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(
self.args.compiler,
['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(
self.platform, ['all']) if target.get('uses_polling', True) else
['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary,
'--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary,
'--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
if self.args.config == 'ubsan':
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang++%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc7.2':
return ('ubuntu1710', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
elif compiler == 'clang7.0':
# clang++-7.0 alias doesn't exist and there are no other clang versions
# installed.
return ('sanitizers_jessie', self._clang_make_options())
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),
) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self._python_manager_name(), _docker_arch_suffix(self.args.arch))
def _python_manager_name(self):
"""Choose the docker image to use based on python version."""
if self.args.compiler in [
'python2.7', 'python3.5', 'python3.6', 'python3.7'
]:
return 'stretch_' + self.args.compiler[len('python'):]
elif self.args.compiler == 'python_alpine':
return 'alpine'
elif self.args.compiler == 'python3.4':
return 'jessie'
else:
return 'stretch_3.7'
def _get_pythons(self, args):
"""Get python runtimes to test with, based on current platform, architecture, compiler etc."""
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
test_command = 'test_lite'
if args.iomgr_platform == 'gevent':
test_command = 'test_gevent'
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(
shell, builder, builder_prefix_arguments, venv_relative_python,
toolchain, runner, test_command, args.iomgr_platform)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
python37_config = _python_config_generator(
name='py37',
major='3',
minor='7',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (
python27_config,
python37_config,
)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'python3.7':
return (python37_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (
python27_config,
python34_config,
python35_config,
python36_config,
python37_config,
)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=20 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['default', 'coreclr'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
elif self.platform == 'mac':
# mono before version 5.2 on MacOS defaults to 32bit runtime
runtime_cmd = ['mono', '--arch=64']
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file,
'--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
# no need to set x86 specific flags as run_tests.py
# currently forbids x86 C# builds on both Linux and MacOS.
return 'cmake/build/Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
self.config.job_spec(
['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
timeout_seconds=10 * 60,
shortname='cfstream-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [
['src/objective-c/tests/build_tests.sh'],
['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict(
(cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=sorted(_LANGUAGES.keys()),
nargs='+',
required=True)
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help=
'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'pypy',
'pypy3', 'python_alpine', 'all_the_cpythons', 'electron1.3',
'electron1.6', 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help=
'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv', 'gevent'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help=
'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epoll1,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--auto_set_flakes',
default=False,
const=True,
action='store_const',
help=
'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
)
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print(
"Unexpected error getting flaky tests: %s" % traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C & C++ to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option
for lang in languages
for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
)]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec for language in languages for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run)
if infinite_runs else itertools.repeat(
massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' % (k, num_failures,
num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| 35.096245 | 135 | 0.566773 |
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in
tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception(
'Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell',
'builder',
'builder_prefix_arguments',
'venv_relative_python',
'toolchain',
'runner',
'test_name',
'iomgr_platform',
])
def _python_config_generator(name, major, minor, bits, config_vars):
name += '_' + config_vars.iomgr_platform
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0]),
config_vars.test_name
])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(
self.args.compiler,
['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(
self.platform, ['all']) if target.get('uses_polling', True) else
['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary,
'--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary,
'--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
if self.args.config == 'ubsan':
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang++%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc7.2':
return ('ubuntu1710', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
elif compiler == 'clang7.0':
# clang++-7.0 alias doesn't exist and there are no other clang versions
return ('sanitizers_jessie', self._clang_make_options())
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),
) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self._python_manager_name(), _docker_arch_suffix(self.args.arch))
def _python_manager_name(self):
if self.args.compiler in [
'python2.7', 'python3.5', 'python3.6', 'python3.7'
]:
return 'stretch_' + self.args.compiler[len('python'):]
elif self.args.compiler == 'python_alpine':
return 'alpine'
elif self.args.compiler == 'python3.4':
return 'jessie'
else:
return 'stretch_3.7'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
test_command = 'test_lite'
if args.iomgr_platform == 'gevent':
test_command = 'test_gevent'
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(
shell, builder, builder_prefix_arguments, venv_relative_python,
toolchain, runner, test_command, args.iomgr_platform)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
python37_config = _python_config_generator(
name='py37',
major='3',
minor='7',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (
python27_config,
python37_config,
)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'python3.7':
return (python37_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (
python27_config,
python34_config,
python35_config,
python36_config,
python37_config,
)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=20 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['default', 'coreclr'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
elif self.platform == 'mac':
# mono before version 5.2 on MacOS defaults to 32bit runtime
runtime_cmd = ['mono', '--arch=64']
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file,
'--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
# no need to set x86 specific flags as run_tests.py
# currently forbids x86 C# builds on both Linux and MacOS.
return 'cmake/build/Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
self.config.job_spec(
['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
timeout_seconds=10 * 60,
shortname='cfstream-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [
['src/objective-c/tests/build_tests.sh'],
['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict(
(cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=sorted(_LANGUAGES.keys()),
nargs='+',
required=True)
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help=
'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'pypy',
'pypy3', 'python_alpine', 'all_the_cpythons', 'electron1.3',
'electron1.6', 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help=
'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv', 'gevent'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help=
'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epoll1,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--auto_set_flakes',
default=False,
const=True,
action='store_const',
help=
'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
)
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print(
"Unexpected error getting flaky tests: %s" % traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
language_make_options = list(
set([
make_option
for lang in languages
for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t'
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
)]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec for language in languages for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
massaged_one_run = list(
one_run)
num_jobs = len(massaged_one_run)
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run)
if infinite_runs else itertools.repeat(
massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' % (k, num_failures,
num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| true | true |
f71be3b444baa4607d29128c718bb04ae6b2c311 | 32,709 | py | Python | ryu/lib/ofctl_v1_4.py | vinaykothiyal/ryu | 32551989c649311854215df29860ccb272c105c0 | [
"Apache-2.0"
] | null | null | null | ryu/lib/ofctl_v1_4.py | vinaykothiyal/ryu | 32551989c649311854215df29860ccb272c105c0 | [
"Apache-2.0"
] | null | null | null | ryu/lib/ofctl_v1_4.py | vinaykothiyal/ryu | 32551989c649311854215df29860ccb272c105c0 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def _get_actions(dp, dics):
actions = []
for d in dics:
action = to_action(dp, d)
if action is not None:
actions.append(action)
else:
LOG.error('Unknown action type: %s', d)
return actions
def to_instructions(dp, insts):
instructions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for i in insts:
inst_type = i.get('type')
if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']:
dics = i.get('actions', [])
actions = _get_actions(dp, dics)
if actions:
if inst_type == 'APPLY_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
else:
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
actions))
elif inst_type == 'CLEAR_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif inst_type == 'GOTO_TABLE':
table_id = str_to_int(i.get('table_id'))
instructions.append(parser.OFPInstructionGotoTable(table_id))
elif inst_type == 'WRITE_METADATA':
metadata = str_to_int(i.get('metadata'))
metadata_mask = (str_to_int(i['metadata_mask'])
if 'metadata_mask' in i
else parser.UINT64_MAX)
instructions.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif inst_type == 'METER':
meter_id = str_to_int(i.get('meter_id'))
instructions.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown instruction type: %s', inst_type)
return instructions
def action_to_str(act):
s = act.to_jsondict()[act.__class__.__name__]
t = UTIL.ofp_action_type_to_user(s['type'])
s['type'] = t if t != s['type'] else 'UNKNOWN'
if 'field' in s:
field = s.pop('field')
s['field'] = field['OXMTlv']['field']
s['mask'] = field['OXMTlv']['mask']
s['value'] = field['OXMTlv']['value']
return s
def instructions_to_str(instructions):
s = []
for i in instructions:
v = i.to_jsondict()[i.__class__.__name__]
t = UTIL.ofp_instruction_type_to_user(v['type'])
inst_type = t if t != v['type'] else 'UNKNOWN'
# apply/write/clear-action instruction
if isinstance(i, ofproto_v1_4_parser.OFPInstructionActions):
acts = []
for a in i.actions:
acts.append(action_to_str(a))
v['type'] = inst_type
v['actions'] = acts
s.append(v)
# others
else:
v['type'] = inst_type
s.append(v)
return s
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'eth_type': str_to_int,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'ip_proto': str_to_int,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int,
'mpls_bos': str_to_int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int,
'pbb_uca': str_to_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'ipv4_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['ipv4_src']
del attrs['ipv4_src']
if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['ipv4_dst']
del attrs['ipv4_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_4.OFPVID_PRESENT)
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'vlan_vid':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_4.OFPVID_PRESENT)
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = t if t != p['type'] else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user)
def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueDescStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for queue in msg.body:
q = queue.to_jsondict()[queue.__class__.__name__]
prop_list = []
for prop in queue.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
prop_list.append(p)
q['properties'] = prop_list
configs.append(q)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
# Note: OpenFlow does not allow to filter flow entries by priority,
# but for efficiency, ofctl provides the way to do it.
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = stats.to_jsondict()[stats.__class__.__name__]
s['instructions'] = instructions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS,
ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
if prop.type in p_type_instructions:
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len,
'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for i in prop.action_ids:
act = i.to_jsondict()[i.__class__.__name__]
action_ids.append(act)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for i in prop.oxm_ids:
oxm = i.to_jsondict()[i.__class__.__name__]
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
properties = []
for prop in stats.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_port_stats_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
bands = []
for band in stats.band_stats:
b = band.to_jsondict()[band.__class__.__name__]
bands.append(b)
s['band_stats'] = bands
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
c = config.to_jsondict()[config.__class__.__name__]
bands = []
for band in config.bands:
b = band.to_jsondict()[band.__class__.__name__]
if to_user:
t = UTIL.ofp_meter_band_type_to_user(band.type)
b['type'] = t if t != band.type else 'UNKNOWN'
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c['flags'] = c_flags
c['bands'] = bands
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
g = stats.to_jsondict()[stats.__class__.__name__]
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__]
bucket_stats.append(c)
g['bucket_stats'] = bucket_stats
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB',
ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
d = stats.to_jsondict()[stats.__class__.__name__]
buckets = []
for bucket in stats.buckets:
b = bucket.to_jsondict()[bucket.__class__.__name__]
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b['actions'] = actions
buckets.append(b)
d['buckets'] = buckets
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
t = UTIL.ofp_group_type_to_user(stats.type)
d['type'] = t if t != stats.type else 'UNKNOWN'
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_port_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
d['name'] = stat.name.decode('utf-8')
d['properties'] = properties
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
importance = str_to_int(flow.get('importance', 0))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_instructions(dp, flow.get('instructions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, importance, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
t = UTIL.ofp_meter_flags_from_user(flag)
f = t if t != flag else None
if f is None:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= f
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = str_to_int(band.get('rate', 0))
burst_size = str_to_int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = str_to_int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = str_to_int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
group_type = str(group.get('type', 'ALL'))
t = UTIL.ofp_group_type_from_user(group_type)
group_type = t if t != group_type else None
if group_type is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = str_to_int(bucket.get('weight', 0))
watch_port = str_to_int(
bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = str_to_int(
bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, group_type, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
ofp = dp.ofproto
parser = dp.ofproto_parser
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
properties = port_config.get('properties')
prop = []
for p in properties:
type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type'])
length = None
if type_ == ofp.OFPPDPT_ETHERNET:
advertise = UTIL.ofp_port_features_from_user(p['advertise'])
prop.append(
parser.OFPPortModPropEthernet(type_, length, advertise))
elif type_ == ofp.OFPPDPT_OPTICAL:
prop.append(
parser.OFPPortModPropOptical(
type_, length, p['configure'], p['freq_lmda'],
p['fl_offset'], p['grid_span'], p['tx_pwr']))
elif type_ == ofp.OFPPDPT_EXPERIMENTER:
prop.append(
parser.OFPPortModPropExperimenter(
type_, length, p['experimenter'], p['exp_type'],
p['data']))
else:
LOG.error('Unknown port desc prop type: %s', type_)
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, prop)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter
| 33.965732 | 78 | 0.575897 |
import logging
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def _get_actions(dp, dics):
actions = []
for d in dics:
action = to_action(dp, d)
if action is not None:
actions.append(action)
else:
LOG.error('Unknown action type: %s', d)
return actions
def to_instructions(dp, insts):
instructions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for i in insts:
inst_type = i.get('type')
if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']:
dics = i.get('actions', [])
actions = _get_actions(dp, dics)
if actions:
if inst_type == 'APPLY_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
else:
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
actions))
elif inst_type == 'CLEAR_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif inst_type == 'GOTO_TABLE':
table_id = str_to_int(i.get('table_id'))
instructions.append(parser.OFPInstructionGotoTable(table_id))
elif inst_type == 'WRITE_METADATA':
metadata = str_to_int(i.get('metadata'))
metadata_mask = (str_to_int(i['metadata_mask'])
if 'metadata_mask' in i
else parser.UINT64_MAX)
instructions.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif inst_type == 'METER':
meter_id = str_to_int(i.get('meter_id'))
instructions.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown instruction type: %s', inst_type)
return instructions
def action_to_str(act):
s = act.to_jsondict()[act.__class__.__name__]
t = UTIL.ofp_action_type_to_user(s['type'])
s['type'] = t if t != s['type'] else 'UNKNOWN'
if 'field' in s:
field = s.pop('field')
s['field'] = field['OXMTlv']['field']
s['mask'] = field['OXMTlv']['mask']
s['value'] = field['OXMTlv']['value']
return s
def instructions_to_str(instructions):
s = []
for i in instructions:
v = i.to_jsondict()[i.__class__.__name__]
t = UTIL.ofp_instruction_type_to_user(v['type'])
inst_type = t if t != v['type'] else 'UNKNOWN'
if isinstance(i, ofproto_v1_4_parser.OFPInstructionActions):
acts = []
for a in i.actions:
acts.append(action_to_str(a))
v['type'] = inst_type
v['actions'] = acts
s.append(v)
else:
v['type'] = inst_type
s.append(v)
return s
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'eth_type': str_to_int,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'ip_proto': str_to_int,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int,
'mpls_bos': str_to_int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int,
'pbb_uca': str_to_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'ipv4_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['ipv4_src']
del attrs['ipv4_src']
if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['ipv4_dst']
del attrs['ipv4_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
key = keys[key]
if key in convert:
value = convert[key](value)
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_4.OFPVID_PRESENT)
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'vlan_vid':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_4.OFPVID_PRESENT)
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = t if t != p['type'] else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user)
def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueDescStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for queue in msg.body:
q = queue.to_jsondict()[queue.__class__.__name__]
prop_list = []
for prop in queue.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
prop_list.append(p)
q['properties'] = prop_list
configs.append(q)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = stats.to_jsondict()[stats.__class__.__name__]
s['instructions'] = instructions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS,
ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
if prop.type in p_type_instructions:
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len,
'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for i in prop.action_ids:
act = i.to_jsondict()[i.__class__.__name__]
action_ids.append(act)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for i in prop.oxm_ids:
oxm = i.to_jsondict()[i.__class__.__name__]
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
properties = []
for prop in stats.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_port_stats_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
bands = []
for band in stats.band_stats:
b = band.to_jsondict()[band.__class__.__name__]
bands.append(b)
s['band_stats'] = bands
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
c = config.to_jsondict()[config.__class__.__name__]
bands = []
for band in config.bands:
b = band.to_jsondict()[band.__class__.__name__]
if to_user:
t = UTIL.ofp_meter_band_type_to_user(band.type)
b['type'] = t if t != band.type else 'UNKNOWN'
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c['flags'] = c_flags
c['bands'] = bands
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
g = stats.to_jsondict()[stats.__class__.__name__]
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__]
bucket_stats.append(c)
g['bucket_stats'] = bucket_stats
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB',
ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
d = stats.to_jsondict()[stats.__class__.__name__]
buckets = []
for bucket in stats.buckets:
b = bucket.to_jsondict()[bucket.__class__.__name__]
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b['actions'] = actions
buckets.append(b)
d['buckets'] = buckets
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
t = UTIL.ofp_group_type_to_user(stats.type)
d['type'] = t if t != stats.type else 'UNKNOWN'
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_port_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
d['name'] = stat.name.decode('utf-8')
d['properties'] = properties
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
importance = str_to_int(flow.get('importance', 0))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_instructions(dp, flow.get('instructions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, importance, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
t = UTIL.ofp_meter_flags_from_user(flag)
f = t if t != flag else None
if f is None:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= f
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = str_to_int(band.get('rate', 0))
burst_size = str_to_int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = str_to_int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = str_to_int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
group_type = str(group.get('type', 'ALL'))
t = UTIL.ofp_group_type_from_user(group_type)
group_type = t if t != group_type else None
if group_type is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = str_to_int(bucket.get('weight', 0))
watch_port = str_to_int(
bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = str_to_int(
bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, group_type, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
ofp = dp.ofproto
parser = dp.ofproto_parser
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
properties = port_config.get('properties')
prop = []
for p in properties:
type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type'])
length = None
if type_ == ofp.OFPPDPT_ETHERNET:
advertise = UTIL.ofp_port_features_from_user(p['advertise'])
prop.append(
parser.OFPPortModPropEthernet(type_, length, advertise))
elif type_ == ofp.OFPPDPT_OPTICAL:
prop.append(
parser.OFPPortModPropOptical(
type_, length, p['configure'], p['freq_lmda'],
p['fl_offset'], p['grid_span'], p['tx_pwr']))
elif type_ == ofp.OFPPDPT_EXPERIMENTER:
prop.append(
parser.OFPPortModPropExperimenter(
type_, length, p['experimenter'], p['exp_type'],
p['data']))
else:
LOG.error('Unknown port desc prop type: %s', type_)
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, prop)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
send_experimenter = ofctl_utils.send_experimenter
| true | true |
f71be3d981254befffd5928f1197ba90e0eb5617 | 15,646 | py | Python | venv/Lib/site-packages/pooch/downloaders.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 4 | 2021-03-29T19:15:29.000Z | 2021-06-08T05:34:00.000Z | venv/Lib/site-packages/pooch/downloaders.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 1 | 2021-06-08T06:03:51.000Z | 2021-06-08T06:03:51.000Z | venv/Lib/site-packages/pooch/downloaders.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 1 | 2021-01-31T18:58:54.000Z | 2021-01-31T18:58:54.000Z | # Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
The classes that actually handle the downloads.
"""
import sys
import ftplib
import requests
from .utils import parse_url
try:
from tqdm import tqdm
except ImportError:
tqdm = None
try:
import paramiko
except ImportError:
paramiko = None
def choose_downloader(url):
"""
Choose the appropriate downloader for the given URL based on the protocol.
Parameters
----------
url : str
A URL (including protocol).
Returns
-------
downloader
A downloader class (either :class:`pooch.HTTPDownloader`,
:class:`pooch.FTPDownloader`, or :class: `pooch.SFTPDownloader`).
Examples
--------
>>> downloader = choose_downloader("http://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("https://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("ftp://something.com")
>>> print(downloader.__class__.__name__)
FTPDownloader
"""
known_downloaders = {
"ftp": FTPDownloader,
"https": HTTPDownloader,
"http": HTTPDownloader,
"sftp": SFTPDownloader,
}
parsed_url = parse_url(url)
if parsed_url["protocol"] not in known_downloaders:
raise ValueError(
f"Unrecognized URL protocol '{parsed_url['protocol']}' in '{url}'. "
f"Must be one of {known_downloaders.keys()}."
)
downloader = known_downloaders[parsed_url["protocol"]]()
return downloader
class HTTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over HTTP/HTTPS.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`requests` library to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
**kwargs
All keyword arguments given when creating an instance of this class
will be passed to :func:`requests.get`.
Examples
--------
Download one of the data files from the Pooch repository:
>>> import os
>>> from pooch import version, check_version
>>> url = "https://github.com/fatiando/pooch/raw/{}/data/tiny-data.txt"
>>> url = url.format(check_version(version.full_version))
>>> downloader = HTTPDownloader()
>>> # Not using with Pooch.fetch so no need to pass an instance of Pooch
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> os.path.exists("tiny-data.txt")
True
>>> with open("tiny-data.txt") as f:
... print(f.read().strip())
# A tiny data file for test purposes only
1 2 3 4 5 6
>>> os.remove("tiny-data.txt")
Authentication can be handled by passing a user name and password to
:func:`requests.get`. All arguments provided when creating an instance of
the class are forwarded to :func:`requests.get`. We'll use
``auth=(username, password)`` to use basic HTTPS authentication. The
https://httpbin.org website allows us to make a fake a login request using
whatever username and password we provide to it:
>>> user = "doggo"
>>> password = "goodboy"
>>> # httpbin will ask for the user and password we provide in the URL
>>> url = f"https://httpbin.org/basic-auth/{user}/{password}"
>>> # Trying without the login credentials causes an error
>>> downloader = HTTPDownloader()
>>> try:
... downloader(url=url, output_file="tiny-data.txt", pooch=None)
... except Exception:
... print("There was an error!")
There was an error!
>>> # Pass in the credentials to HTTPDownloader
>>> downloader = HTTPDownloader(auth=(user, password))
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> with open("tiny-data.txt") as f:
... for line in f:
... print(line.rstrip())
{
"authenticated": true,
"user": "doggo"
}
>>> os.remove("tiny-data.txt")
"""
def __init__(self, progressbar=False, chunk_size=1024, **kwargs):
self.kwargs = kwargs
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over HTTP to the given output file.
Uses :func:`requests.get`.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
kwargs = self.kwargs.copy()
kwargs.setdefault("stream", True)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
content = response.iter_content(chunk_size=self.chunk_size)
if self.progressbar:
total = int(response.headers.get("content-length", 0))
# Need to use ascii characters on Windows because there isn't
# always full unicode support
# (see https://github.com/tqdm/tqdm/issues/454)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=total,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
for chunk in content:
if chunk:
output_file.write(chunk)
output_file.flush()
if self.progressbar:
# Use the chunk size here because chunk may be much
# larger if the data are decompressed by requests after
# reading (happens with text files).
progress.update(self.chunk_size)
# Make sure the progress bar gets filled even if the actual number
# is chunks is smaller than expected. This happens when streaming
# text files that are compressed by the server when sending (gzip).
# Binary files don't experience this.
if self.progressbar:
progress.reset()
progress.update(total)
progress.close()
finally:
if ispath:
output_file.close()
class FTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over FTP.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`ftplib` module to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the FTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP). Use the empty string
to indicate no password is required.
account : str
Some servers also require an "account" name for authentication.
timeout : int
Timeout in seconds for ftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
"""
def __init__(
self,
port=21,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
chunk_size=1024,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over FTP to the given output file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
ftp = ftplib.FTP(timeout=self.timeout)
ftp.connect(host=parsed_url["netloc"], port=self.port)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
ftp.login(user=self.username, passwd=self.password, acct=self.account)
command = f"RETR {parsed_url['path']}"
if self.progressbar:
# Make sure the file is set to binary mode, otherwise we can't
# get the file size. See: https://stackoverflow.com/a/22093848
ftp.voidcmd("TYPE I")
size = int(ftp.size(parsed_url["path"]))
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(data):
"Update the progress bar and write to output"
progress.update(len(data))
output_file.write(data)
ftp.retrbinary(command, callback, blocksize=self.chunk_size)
else:
ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)
finally:
ftp.quit()
if ispath:
output_file.close()
class SFTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over SFTP.
When called, downloads the given file URL into the specified local file.
Requires `paramiko <https://github.com/paramiko/paramiko>`__ to be
installed.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the SFTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP). Use the empty
string to indicate no password is required.
timeout : int
Timeout in seconds for sftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard
error (stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to
be installed.
"""
def __init__(
self,
port=22,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
# Collect errors and raise only once so that both missing packages are
# captured. Otherwise, the user is only warned of one of them at a
# time (and we can't test properly when they are both missing).
errors = []
if self.progressbar and tqdm is None:
errors.append("Missing package 'tqdm' required for progress bars.")
if paramiko is None:
errors.append("Missing package 'paramiko' required for SFTP downloads.")
if errors:
raise ValueError(" ".join(errors))
def __call__(self, url, output_file, pooch):
"""
Download the given URL over SFTP to the given output file.
The output file must be given as a string (file name/path) and not an
open file object! Otherwise, paramiko cannot save to that file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str
Path (and file name) to which the file will be downloaded. **Cannot
be a file object**.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
connection = paramiko.Transport(sock=(parsed_url["netloc"], self.port))
sftp = None
try:
connection.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(connection)
sftp.get_channel().settimeout = self.timeout
if self.progressbar:
size = int(sftp.stat(parsed_url["path"]).st_size)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(current, total):
"Update the progress bar and write to output"
progress.total = int(total)
progress.update(int(current - progress.n))
sftp.get(parsed_url["path"], output_file, callback=callback)
else:
sftp.get(parsed_url["path"], output_file)
finally:
connection.close()
if sftp is not None:
sftp.close()
| 35.803204 | 85 | 0.592612 |
import sys
import ftplib
import requests
from .utils import parse_url
try:
from tqdm import tqdm
except ImportError:
tqdm = None
try:
import paramiko
except ImportError:
paramiko = None
def choose_downloader(url):
known_downloaders = {
"ftp": FTPDownloader,
"https": HTTPDownloader,
"http": HTTPDownloader,
"sftp": SFTPDownloader,
}
parsed_url = parse_url(url)
if parsed_url["protocol"] not in known_downloaders:
raise ValueError(
f"Unrecognized URL protocol '{parsed_url['protocol']}' in '{url}'. "
f"Must be one of {known_downloaders.keys()}."
)
downloader = known_downloaders[parsed_url["protocol"]]()
return downloader
class HTTPDownloader:
def __init__(self, progressbar=False, chunk_size=1024, **kwargs):
self.kwargs = kwargs
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
kwargs = self.kwargs.copy()
kwargs.setdefault("stream", True)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
content = response.iter_content(chunk_size=self.chunk_size)
if self.progressbar:
total = int(response.headers.get("content-length", 0))
# always full unicode support
# (see https://github.com/tqdm/tqdm/issues/454)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=total,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
for chunk in content:
if chunk:
output_file.write(chunk)
output_file.flush()
if self.progressbar:
# Use the chunk size here because chunk may be much
# larger if the data are decompressed by requests after
# reading (happens with text files).
progress.update(self.chunk_size)
# Make sure the progress bar gets filled even if the actual number
# is chunks is smaller than expected. This happens when streaming
# text files that are compressed by the server when sending (gzip).
# Binary files don't experience this.
if self.progressbar:
progress.reset()
progress.update(total)
progress.close()
finally:
if ispath:
output_file.close()
class FTPDownloader:
def __init__(
self,
port=21,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
chunk_size=1024,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
parsed_url = parse_url(url)
ftp = ftplib.FTP(timeout=self.timeout)
ftp.connect(host=parsed_url["netloc"], port=self.port)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
ftp.login(user=self.username, passwd=self.password, acct=self.account)
command = f"RETR {parsed_url['path']}"
if self.progressbar:
# get the file size. See: https://stackoverflow.com/a/22093848
ftp.voidcmd("TYPE I")
size = int(ftp.size(parsed_url["path"]))
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(data):
progress.update(len(data))
output_file.write(data)
ftp.retrbinary(command, callback, blocksize=self.chunk_size)
else:
ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)
finally:
ftp.quit()
if ispath:
output_file.close()
class SFTPDownloader: # pylint: disable=too-few-public-methods
def __init__(
self,
port=22,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
# Collect errors and raise only once so that both missing packages are
# captured. Otherwise, the user is only warned of one of them at a
# time (and we can't test properly when they are both missing).
errors = []
if self.progressbar and tqdm is None:
errors.append("Missing package 'tqdm' required for progress bars.")
if paramiko is None:
errors.append("Missing package 'paramiko' required for SFTP downloads.")
if errors:
raise ValueError(" ".join(errors))
def __call__(self, url, output_file, pooch):
parsed_url = parse_url(url)
connection = paramiko.Transport(sock=(parsed_url["netloc"], self.port))
sftp = None
try:
connection.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(connection)
sftp.get_channel().settimeout = self.timeout
if self.progressbar:
size = int(sftp.stat(parsed_url["path"]).st_size)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(current, total):
progress.total = int(total)
progress.update(int(current - progress.n))
sftp.get(parsed_url["path"], output_file, callback=callback)
else:
sftp.get(parsed_url["path"], output_file)
finally:
connection.close()
if sftp is not None:
sftp.close()
| true | true |
f71be436acf919b617f27f823476c8b4531b4b98 | 7,043 | py | Python | homeassistant/auth/providers/trusted_networks.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 3 | 2020-01-21T18:09:09.000Z | 2022-01-17T08:06:03.000Z | homeassistant/auth/providers/trusted_networks.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 39 | 2016-12-16T12:40:34.000Z | 2017-02-13T17:53:42.000Z | homeassistant/auth/providers/trusted_networks.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 6 | 2020-04-10T06:21:11.000Z | 2021-07-01T08:53:38.000Z | """Trusted Networks auth provider.
It shows list of users if access from trusted network.
Abort login flow if not access from trusted network.
"""
from ipaddress import ip_network, IPv4Address, IPv6Address, IPv4Network, IPv6Network
from typing import Any, Dict, List, Optional, Union, cast
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from . import AuthProvider, AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, LoginFlow
from ..models import Credentials, UserMeta
IPAddress = Union[IPv4Address, IPv6Address]
IPNetwork = Union[IPv4Network, IPv6Network]
CONF_TRUSTED_NETWORKS = "trusted_networks"
CONF_TRUSTED_USERS = "trusted_users"
CONF_GROUP = "group"
CONF_ALLOW_BYPASS_LOGIN = "allow_bypass_login"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_TRUSTED_NETWORKS): vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_USERS, default={}): vol.Schema(
# we only validate the format of user_id or group_id
{
ip_network: vol.All(
cv.ensure_list,
[
vol.Or(
cv.uuid4_hex,
vol.Schema({vol.Required(CONF_GROUP): cv.uuid4_hex}),
)
],
)
}
),
vol.Optional(CONF_ALLOW_BYPASS_LOGIN, default=False): cv.boolean,
},
extra=vol.PREVENT_EXTRA,
)
class InvalidAuthError(HomeAssistantError):
"""Raised when try to access from untrusted networks."""
class InvalidUserError(HomeAssistantError):
"""Raised when try to login as invalid user."""
@AUTH_PROVIDERS.register("trusted_networks")
class TrustedNetworksAuthProvider(AuthProvider):
"""Trusted Networks auth provider.
Allow passwordless access from trusted network.
"""
DEFAULT_TITLE = "Trusted Networks"
@property
def trusted_networks(self) -> List[IPNetwork]:
"""Return trusted networks."""
return cast(List[IPNetwork], self.config[CONF_TRUSTED_NETWORKS])
@property
def trusted_users(self) -> Dict[IPNetwork, Any]:
"""Return trusted users per network."""
return cast(Dict[IPNetwork, Any], self.config[CONF_TRUSTED_USERS])
@property
def support_mfa(self) -> bool:
"""Trusted Networks auth provider does not support MFA."""
return False
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
assert context is not None
ip_addr = cast(IPAddress, context.get("ip_address"))
users = await self.store.async_get_users()
available_users = [
user for user in users if not user.system_generated and user.is_active
]
for ip_net, user_or_group_list in self.trusted_users.items():
if ip_addr in ip_net:
user_list = [
user_id
for user_id in user_or_group_list
if isinstance(user_id, str)
]
group_list = [
group[CONF_GROUP]
for group in user_or_group_list
if isinstance(group, dict)
]
flattened_group_list = [
group for sublist in group_list for group in sublist
]
available_users = [
user
for user in available_users
if (
user.id in user_list
or any(
[group.id in flattened_group_list for group in user.groups]
)
)
]
break
return TrustedNetworksLoginFlow(
self,
ip_addr,
{user.id: user.name for user in available_users},
self.config[CONF_ALLOW_BYPASS_LOGIN],
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
user_id = flow_result["user"]
users = await self.store.async_get_users()
for user in users:
if not user.system_generated and user.is_active and user.id == user_id:
for credential in await self.async_credentials():
if credential.data["user_id"] == user_id:
return credential
cred = self.async_create_credentials({"user_id": user_id})
await self.store.async_link_user(user, cred)
return cred
# We only allow login as exist user
raise InvalidUserError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Trusted network auth provider should never create new user.
"""
raise NotImplementedError
@callback
def async_validate_access(self, ip_addr: IPAddress) -> None:
"""Make sure the access from trusted networks.
Raise InvalidAuthError if not.
Raise InvalidAuthError if trusted_networks is not configured.
"""
if not self.trusted_networks:
raise InvalidAuthError("trusted_networks is not configured")
if not any(
ip_addr in trusted_network for trusted_network in self.trusted_networks
):
raise InvalidAuthError("Not in trusted_networks")
class TrustedNetworksLoginFlow(LoginFlow):
"""Handler for the login flow."""
def __init__(
self,
auth_provider: TrustedNetworksAuthProvider,
ip_addr: IPAddress,
available_users: Dict[str, Optional[str]],
allow_bypass_login: bool,
) -> None:
"""Initialize the login flow."""
super().__init__(auth_provider)
self._available_users = available_users
self._ip_address = ip_addr
self._allow_bypass_login = allow_bypass_login
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
try:
cast(
TrustedNetworksAuthProvider, self._auth_provider
).async_validate_access(self._ip_address)
except InvalidAuthError:
return self.async_abort(reason="not_whitelisted")
if user_input is not None:
return await self.async_finish(user_input)
if self._allow_bypass_login and len(self._available_users) == 1:
return await self.async_finish(
{"user": next(iter(self._available_users.keys()))}
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({"user": vol.In(self._available_users)}),
)
| 34.18932 | 87 | 0.609115 | from ipaddress import ip_network, IPv4Address, IPv6Address, IPv4Network, IPv6Network
from typing import Any, Dict, List, Optional, Union, cast
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from . import AuthProvider, AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, LoginFlow
from ..models import Credentials, UserMeta
IPAddress = Union[IPv4Address, IPv6Address]
IPNetwork = Union[IPv4Network, IPv6Network]
CONF_TRUSTED_NETWORKS = "trusted_networks"
CONF_TRUSTED_USERS = "trusted_users"
CONF_GROUP = "group"
CONF_ALLOW_BYPASS_LOGIN = "allow_bypass_login"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_TRUSTED_NETWORKS): vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_USERS, default={}): vol.Schema(
{
ip_network: vol.All(
cv.ensure_list,
[
vol.Or(
cv.uuid4_hex,
vol.Schema({vol.Required(CONF_GROUP): cv.uuid4_hex}),
)
],
)
}
),
vol.Optional(CONF_ALLOW_BYPASS_LOGIN, default=False): cv.boolean,
},
extra=vol.PREVENT_EXTRA,
)
class InvalidAuthError(HomeAssistantError):
class InvalidUserError(HomeAssistantError):
@AUTH_PROVIDERS.register("trusted_networks")
class TrustedNetworksAuthProvider(AuthProvider):
DEFAULT_TITLE = "Trusted Networks"
@property
def trusted_networks(self) -> List[IPNetwork]:
return cast(List[IPNetwork], self.config[CONF_TRUSTED_NETWORKS])
@property
def trusted_users(self) -> Dict[IPNetwork, Any]:
return cast(Dict[IPNetwork, Any], self.config[CONF_TRUSTED_USERS])
@property
def support_mfa(self) -> bool:
return False
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
assert context is not None
ip_addr = cast(IPAddress, context.get("ip_address"))
users = await self.store.async_get_users()
available_users = [
user for user in users if not user.system_generated and user.is_active
]
for ip_net, user_or_group_list in self.trusted_users.items():
if ip_addr in ip_net:
user_list = [
user_id
for user_id in user_or_group_list
if isinstance(user_id, str)
]
group_list = [
group[CONF_GROUP]
for group in user_or_group_list
if isinstance(group, dict)
]
flattened_group_list = [
group for sublist in group_list for group in sublist
]
available_users = [
user
for user in available_users
if (
user.id in user_list
or any(
[group.id in flattened_group_list for group in user.groups]
)
)
]
break
return TrustedNetworksLoginFlow(
self,
ip_addr,
{user.id: user.name for user in available_users},
self.config[CONF_ALLOW_BYPASS_LOGIN],
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
user_id = flow_result["user"]
users = await self.store.async_get_users()
for user in users:
if not user.system_generated and user.is_active and user.id == user_id:
for credential in await self.async_credentials():
if credential.data["user_id"] == user_id:
return credential
cred = self.async_create_credentials({"user_id": user_id})
await self.store.async_link_user(user, cred)
return cred
raise InvalidUserError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
raise NotImplementedError
@callback
def async_validate_access(self, ip_addr: IPAddress) -> None:
if not self.trusted_networks:
raise InvalidAuthError("trusted_networks is not configured")
if not any(
ip_addr in trusted_network for trusted_network in self.trusted_networks
):
raise InvalidAuthError("Not in trusted_networks")
class TrustedNetworksLoginFlow(LoginFlow):
def __init__(
self,
auth_provider: TrustedNetworksAuthProvider,
ip_addr: IPAddress,
available_users: Dict[str, Optional[str]],
allow_bypass_login: bool,
) -> None:
super().__init__(auth_provider)
self._available_users = available_users
self._ip_address = ip_addr
self._allow_bypass_login = allow_bypass_login
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
try:
cast(
TrustedNetworksAuthProvider, self._auth_provider
).async_validate_access(self._ip_address)
except InvalidAuthError:
return self.async_abort(reason="not_whitelisted")
if user_input is not None:
return await self.async_finish(user_input)
if self._allow_bypass_login and len(self._available_users) == 1:
return await self.async_finish(
{"user": next(iter(self._available_users.keys()))}
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({"user": vol.In(self._available_users)}),
)
| true | true |
f71be5558aa2a08dbbb15648b144ea54b9b317ff | 854 | py | Python | graphgallery/nn/layers/tensorflow/dropout/dropout.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | graphgallery/nn/layers/tensorflow/dropout/dropout.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | graphgallery/nn/layers/tensorflow/dropout/dropout.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer, Dropout
class SparseDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def call(self, x, training=None):
if training is None:
training = K.learning_phase()
if self.p and training:
values = tf.nn.dropout(x.values, self.p)
return tf.SparseTensor(x.indices, values, x.dense_shape)
return x
class MixedDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = Dropout(p)
self.sparse_dropout = SparseDropout(p)
def call(self, x):
if K.is_sparse(x):
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
| 26.6875 | 69 | 0.591335 | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer, Dropout
class SparseDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def call(self, x, training=None):
if training is None:
training = K.learning_phase()
if self.p and training:
values = tf.nn.dropout(x.values, self.p)
return tf.SparseTensor(x.indices, values, x.dense_shape)
return x
class MixedDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = Dropout(p)
self.sparse_dropout = SparseDropout(p)
def call(self, x):
if K.is_sparse(x):
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
| true | true |
f71be5cc21797fff7f87730b2e377bc2fe651fa1 | 14,118 | py | Python | clinica/pipelines/deeplearning_prepare_data/deeplearning_prepare_data_pipeline.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/pipelines/deeplearning_prepare_data/deeplearning_prepare_data_pipeline.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/pipelines/deeplearning_prepare_data/deeplearning_prepare_data_pipeline.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Use hash instead of parameters for iterables folder names
# Otherwise path will be too long and generate OSError
from nipype import config
import clinica.pipelines.engine as cpe
cfg = dict(execution={"parameterize_dirs": False})
config.update_config(cfg)
class DeepLearningPrepareData(cpe.Pipeline):
"""Deeplearning prepare data - MRI in nifti format are transformed into
PyTorch tensors. The transformation is applied to: the whole volume, a
selection of 3D patches, or slices extracted from the 3D volume. By default
it uses the cropped version of the MRI (see option "--use_uncropper_image")
Returns:
A clinica pipeline object containing the Deeplearning prepare data pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file."""
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ["input_nifti"]
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
return ["image_id"]
def build_input_node(self):
"""Build and connect an input node to the pipeline."""
from os import path
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import (
ClinicaBIDSError,
ClinicaCAPSError,
ClinicaException,
)
from clinica.utils.input_files import (
T1W_EXTENSIVE,
T1W_LINEAR,
T1W_LINEAR_CROPPED,
pet_linear_nii,
)
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import print_images_to_process
from .deeplearning_prepare_data_utils import check_mask_list
# Select the correct filetype corresponding to modality
if self.parameters.get("modality") == "t1-linear":
if self.parameters.get("use_uncropped_image"):
FILE_TYPE = T1W_LINEAR
else:
FILE_TYPE = T1W_LINEAR_CROPPED
if self.parameters.get("modality") == "t1-extensive":
FILE_TYPE = T1W_EXTENSIVE
if self.parameters.get("modality") == "pet-linear":
FILE_TYPE = pet_linear_nii(
self.parameters.get("acq_label"),
self.parameters.get("suvr_reference_region"),
self.parameters.get("use_uncropped_image"),
)
if self.parameters.get("modality") == "custom":
FILE_TYPE = {
"pattern": f"*{self.parameters.get('custom_suffix')}",
"description": "Custom suffix",
}
# Input file:
try:
input_files = clinica_file_reader(
self.subjects, self.sessions, self.caps_directory, FILE_TYPE
)
except ClinicaException as e:
err = (
"Clinica faced error(s) while trying to read files in your CAPS directory.\n"
+ str(e)
)
raise ClinicaBIDSError(err)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last approximately 30 seconds per image.")
if self.parameters.get("extract_method") == "slice":
self.slice_direction = self.parameters.get("slice_direction")
self.slice_mode = self.parameters.get("slice_mode")
else:
self.slice_direction = "axial"
self.slice_mode = "rgb"
if self.parameters.get("extract_method") == "patch":
self.patch_size = self.parameters.get("patch_size")
self.stride_size = self.parameters.get("stride_size")
else:
self.patch_size = 50
self.stride_size = 50
# Load the corresponding masks
if self.parameters.get("extract_method") == "roi":
self.roi_list = self.parameters.get("roi_list")
if self.parameters.get("modality") == "custom":
self.mask_pattern = self.parameters.get("custom_mask_pattern")
self.template = self.parameters.get("custom_template")
if not self.template:
raise ValueError(
"A custom template must be defined when the modality is set to custom."
)
else:
self.mask_pattern = None
from .deeplearning_prepare_data_utils import TEMPLATE_DICT
self.template = TEMPLATE_DICT[self.parameters.get("modality")]
self.masks_location = path.join(
self.caps_directory, "masks", f"tpl-{self.template}"
)
if not self.roi_list:
raise ValueError("A list of regions must be given.")
else:
check_mask_list(
self.masks_location,
self.roi_list,
self.mask_pattern,
not self.parameters.get("use_uncropped_image"),
)
else:
self.masks_location = ""
self.mask_pattern = None
self.roi_list = []
# The reading node
# -------------------------
read_node = npe.Node(
name="ReadingFiles",
iterables=[
("input_nifti", input_files),
],
synchronize=True,
interface=nutil.IdentityInterface(fields=self.get_input_fields()),
)
self.connect(
[
(read_node, self.input_node, [("input_nifti", "input_nifti")]),
]
)
def build_output_node(self):
"""Build and connect an output node to the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.io import DataSink
from clinica.utils.filemanip import get_subject_id
from clinica.utils.nipype import container_from_filename, fix_join
# Write node
# ----------------------
write_node = npe.Node(name="WriteCaps", interface=DataSink())
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
# Get subject ID node
# ----------------------
image_id_node = npe.Node(
interface=nutil.Function(
input_names=["bids_or_caps_file"],
output_names=["image_id"],
function=get_subject_id,
),
name="ImageID",
)
# Find container path from input filename
# ----------------------
container_path = npe.Node(
nutil.Function(
input_names=["bids_or_caps_filename"],
output_names=["container"],
function=container_from_filename,
),
name="ContainerPath",
)
# fmt: off
self.connect(
[
(self.input_node, image_id_node, [("input_nifti", "bids_or_caps_file")]),
(self.input_node, container_path, [("input_nifti", "bids_or_caps_filename")]),
# (image_id_node, write_node, [('image_id', '@image_id')]),
(image_id_node, write_node, [("image_id", "@image_id")]),
]
)
# fmt: on
subfolder = "image_based"
if self.parameters.get("extract_method") == "slice":
subfolder = "slice_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("slices_rgb_output", "@slices_rgb_output")]),
(self.output_node, write_node, [("slices_original_output", "@slices_original_output")]),
]
)
# fmt: on
elif self.parameters.get("extract_method") == "patch":
subfolder = "patch_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("patches_output", "@patches_output")])
]
)
# fmt: on
elif self.parameters.get("extract_method") == "roi":
subfolder = "roi_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("roi_output", "@roi_output")])
]
)
# fmt: on
else:
# fmt: off
self.connect(
[
(self.output_node, write_node, [("output_pt_file", "@output_pt_file")])
]
)
# fmt: on
mod_subfolder = ""
if self.parameters.get("modality") == "t1-linear":
mod_subfolder = "t1_linear"
if self.parameters.get("modality") == "t1-extensive":
mod_subfolder = "t1_extensive"
if self.parameters.get("modality") == "pet-linear":
mod_subfolder = "pet_linear"
if self.parameters.get("modality") == "custom":
mod_subfolder = "custom"
# fmt: off
self.connect(
[
(container_path, write_node, [
(("container", fix_join, "deeplearning_prepare_data", subfolder, mod_subfolder), "container")]),
]
)
# fmt: on
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from .deeplearning_prepare_data_utils import (
extract_patches,
extract_roi,
extract_slices,
save_as_pt,
)
# The processing nodes
# Node to save input in nii.gz format into pytorch .pt format
# ----------------------
save_as_pt = npe.MapNode(
name="save_as_pt",
iterfield=["input_img"],
interface=nutil.Function(
function=save_as_pt,
input_names=["input_img"],
output_names=["output_file"],
),
)
# Extract slices node (options: 3 directions, mode)
# ----------------------
extract_slices_node = npe.MapNode(
name="extract_slices",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_slices,
input_names=["input_tensor", "slice_direction", "slice_mode"],
output_names=["output_file_rgb", "output_file_original"],
),
)
extract_slices_node.inputs.slice_direction = self.slice_direction
extract_slices_node.inputs.slice_mode = self.slice_mode
# Extract patches node (options, patch size and stride size)
# ----------------------
extract_patches_node = npe.MapNode(
name="extract_patches",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_patches,
input_names=["input_tensor", "patch_size", "stride_size"],
output_names=["output_patch"],
),
)
extract_patches_node.inputs.patch_size = self.patch_size
extract_patches_node.inputs.stride_size = self.stride_size
# Extract ROi node
extract_roi_node = npe.MapNode(
name="extract_ROI",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_roi,
input_names=[
"input_tensor",
"masks_location",
"mask_pattern",
"cropped_input",
"roi_list",
"uncrop_output",
],
output_names=["output_roi"],
),
)
extract_roi_node.inputs.masks_location = self.masks_location
extract_roi_node.inputs.mask_pattern = self.mask_pattern
extract_roi_node.inputs.cropped_input = not self.parameters.get(
"use_uncropped_image"
)
extract_roi_node.inputs.roi_list = self.roi_list
extract_roi_node.inputs.uncrop_output = self.parameters.get("roi_uncrop_output")
# Connections
# ----------------------
# fmt: off
self.connect(
[
(self.input_node, save_as_pt, [("input_nifti", "input_img")]),
]
)
if self.parameters.get("extract_method") == "slice":
self.connect(
[
(save_as_pt, extract_slices_node, [("output_file", "input_tensor")]),
(extract_slices_node, self.output_node, [("output_file_rgb", "slices_rgb_output")]),
(extract_slices_node, self.output_node, [("output_file_original", "slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
self.connect(
[
(save_as_pt, extract_patches_node, [("output_file", "input_tensor")]),
(extract_patches_node, self.output_node, [("output_patch", "patches_output")]),
]
)
elif self.parameters.get("extract_method") == "roi":
self.connect(
[
(save_as_pt, extract_roi_node, [("output_file", "input_tensor")]),
(extract_roi_node, self.output_node, [("output_roi", "roi_output")]),
]
)
else:
self.connect(
[
(save_as_pt, self.output_node, [("output_file", "output_pt_file")]),
]
)
# fmt: on
| 35.741772 | 116 | 0.541507 |
from nipype import config
import clinica.pipelines.engine as cpe
cfg = dict(execution={"parameterize_dirs": False})
config.update_config(cfg)
class DeepLearningPrepareData(cpe.Pipeline):
def check_custom_dependencies(self):
def get_input_fields(self):
return ["input_nifti"]
def get_output_fields(self):
return ["image_id"]
def build_input_node(self):
from os import path
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import (
ClinicaBIDSError,
ClinicaCAPSError,
ClinicaException,
)
from clinica.utils.input_files import (
T1W_EXTENSIVE,
T1W_LINEAR,
T1W_LINEAR_CROPPED,
pet_linear_nii,
)
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import print_images_to_process
from .deeplearning_prepare_data_utils import check_mask_list
if self.parameters.get("modality") == "t1-linear":
if self.parameters.get("use_uncropped_image"):
FILE_TYPE = T1W_LINEAR
else:
FILE_TYPE = T1W_LINEAR_CROPPED
if self.parameters.get("modality") == "t1-extensive":
FILE_TYPE = T1W_EXTENSIVE
if self.parameters.get("modality") == "pet-linear":
FILE_TYPE = pet_linear_nii(
self.parameters.get("acq_label"),
self.parameters.get("suvr_reference_region"),
self.parameters.get("use_uncropped_image"),
)
if self.parameters.get("modality") == "custom":
FILE_TYPE = {
"pattern": f"*{self.parameters.get('custom_suffix')}",
"description": "Custom suffix",
}
try:
input_files = clinica_file_reader(
self.subjects, self.sessions, self.caps_directory, FILE_TYPE
)
except ClinicaException as e:
err = (
"Clinica faced error(s) while trying to read files in your CAPS directory.\n"
+ str(e)
)
raise ClinicaBIDSError(err)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last approximately 30 seconds per image.")
if self.parameters.get("extract_method") == "slice":
self.slice_direction = self.parameters.get("slice_direction")
self.slice_mode = self.parameters.get("slice_mode")
else:
self.slice_direction = "axial"
self.slice_mode = "rgb"
if self.parameters.get("extract_method") == "patch":
self.patch_size = self.parameters.get("patch_size")
self.stride_size = self.parameters.get("stride_size")
else:
self.patch_size = 50
self.stride_size = 50
if self.parameters.get("extract_method") == "roi":
self.roi_list = self.parameters.get("roi_list")
if self.parameters.get("modality") == "custom":
self.mask_pattern = self.parameters.get("custom_mask_pattern")
self.template = self.parameters.get("custom_template")
if not self.template:
raise ValueError(
"A custom template must be defined when the modality is set to custom."
)
else:
self.mask_pattern = None
from .deeplearning_prepare_data_utils import TEMPLATE_DICT
self.template = TEMPLATE_DICT[self.parameters.get("modality")]
self.masks_location = path.join(
self.caps_directory, "masks", f"tpl-{self.template}"
)
if not self.roi_list:
raise ValueError("A list of regions must be given.")
else:
check_mask_list(
self.masks_location,
self.roi_list,
self.mask_pattern,
not self.parameters.get("use_uncropped_image"),
)
else:
self.masks_location = ""
self.mask_pattern = None
self.roi_list = []
read_node = npe.Node(
name="ReadingFiles",
iterables=[
("input_nifti", input_files),
],
synchronize=True,
interface=nutil.IdentityInterface(fields=self.get_input_fields()),
)
self.connect(
[
(read_node, self.input_node, [("input_nifti", "input_nifti")]),
]
)
def build_output_node(self):
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.io import DataSink
from clinica.utils.filemanip import get_subject_id
from clinica.utils.nipype import container_from_filename, fix_join
write_node = npe.Node(name="WriteCaps", interface=DataSink())
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
image_id_node = npe.Node(
interface=nutil.Function(
input_names=["bids_or_caps_file"],
output_names=["image_id"],
function=get_subject_id,
),
name="ImageID",
)
container_path = npe.Node(
nutil.Function(
input_names=["bids_or_caps_filename"],
output_names=["container"],
function=container_from_filename,
),
name="ContainerPath",
)
self.connect(
[
(self.input_node, image_id_node, [("input_nifti", "bids_or_caps_file")]),
(self.input_node, container_path, [("input_nifti", "bids_or_caps_filename")]),
(image_id_node, write_node, [("image_id", "@image_id")]),
]
)
subfolder = "image_based"
if self.parameters.get("extract_method") == "slice":
subfolder = "slice_based"
self.connect(
[
(self.output_node, write_node, [("slices_rgb_output", "@slices_rgb_output")]),
(self.output_node, write_node, [("slices_original_output", "@slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
subfolder = "patch_based"
self.connect(
[
(self.output_node, write_node, [("patches_output", "@patches_output")])
]
)
elif self.parameters.get("extract_method") == "roi":
subfolder = "roi_based"
self.connect(
[
(self.output_node, write_node, [("roi_output", "@roi_output")])
]
)
else:
self.connect(
[
(self.output_node, write_node, [("output_pt_file", "@output_pt_file")])
]
)
mod_subfolder = ""
if self.parameters.get("modality") == "t1-linear":
mod_subfolder = "t1_linear"
if self.parameters.get("modality") == "t1-extensive":
mod_subfolder = "t1_extensive"
if self.parameters.get("modality") == "pet-linear":
mod_subfolder = "pet_linear"
if self.parameters.get("modality") == "custom":
mod_subfolder = "custom"
self.connect(
[
(container_path, write_node, [
(("container", fix_join, "deeplearning_prepare_data", subfolder, mod_subfolder), "container")]),
]
)
def build_core_nodes(self):
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from .deeplearning_prepare_data_utils import (
extract_patches,
extract_roi,
extract_slices,
save_as_pt,
)
save_as_pt = npe.MapNode(
name="save_as_pt",
iterfield=["input_img"],
interface=nutil.Function(
function=save_as_pt,
input_names=["input_img"],
output_names=["output_file"],
),
)
extract_slices_node = npe.MapNode(
name="extract_slices",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_slices,
input_names=["input_tensor", "slice_direction", "slice_mode"],
output_names=["output_file_rgb", "output_file_original"],
),
)
extract_slices_node.inputs.slice_direction = self.slice_direction
extract_slices_node.inputs.slice_mode = self.slice_mode
extract_patches_node = npe.MapNode(
name="extract_patches",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_patches,
input_names=["input_tensor", "patch_size", "stride_size"],
output_names=["output_patch"],
),
)
extract_patches_node.inputs.patch_size = self.patch_size
extract_patches_node.inputs.stride_size = self.stride_size
extract_roi_node = npe.MapNode(
name="extract_ROI",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_roi,
input_names=[
"input_tensor",
"masks_location",
"mask_pattern",
"cropped_input",
"roi_list",
"uncrop_output",
],
output_names=["output_roi"],
),
)
extract_roi_node.inputs.masks_location = self.masks_location
extract_roi_node.inputs.mask_pattern = self.mask_pattern
extract_roi_node.inputs.cropped_input = not self.parameters.get(
"use_uncropped_image"
)
extract_roi_node.inputs.roi_list = self.roi_list
extract_roi_node.inputs.uncrop_output = self.parameters.get("roi_uncrop_output")
self.connect(
[
(self.input_node, save_as_pt, [("input_nifti", "input_img")]),
]
)
if self.parameters.get("extract_method") == "slice":
self.connect(
[
(save_as_pt, extract_slices_node, [("output_file", "input_tensor")]),
(extract_slices_node, self.output_node, [("output_file_rgb", "slices_rgb_output")]),
(extract_slices_node, self.output_node, [("output_file_original", "slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
self.connect(
[
(save_as_pt, extract_patches_node, [("output_file", "input_tensor")]),
(extract_patches_node, self.output_node, [("output_patch", "patches_output")]),
]
)
elif self.parameters.get("extract_method") == "roi":
self.connect(
[
(save_as_pt, extract_roi_node, [("output_file", "input_tensor")]),
(extract_roi_node, self.output_node, [("output_roi", "roi_output")]),
]
)
else:
self.connect(
[
(save_as_pt, self.output_node, [("output_file", "output_pt_file")]),
]
)
| true | true |
f71be64382d9c7cd419601ae2aa3d28d776baa3a | 2,261 | py | Python | scripts/xml2txt.py | o8r/pytorch_cpp | 70ba1e64270da6d870847c074ce33afb154f1ef8 | [
"MIT"
] | 181 | 2020-03-26T12:33:25.000Z | 2022-03-28T04:04:25.000Z | scripts/xml2txt.py | o8r/pytorch_cpp | 70ba1e64270da6d870847c074ce33afb154f1ef8 | [
"MIT"
] | 11 | 2020-07-26T13:18:50.000Z | 2022-01-09T10:04:10.000Z | scripts/xml2txt.py | o8r/pytorch_cpp | 70ba1e64270da6d870847c074ce33afb154f1ef8 | [
"MIT"
] | 38 | 2020-05-04T05:06:55.000Z | 2022-03-29T19:10:51.000Z | import os
import glob
import argparse
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser()
# Define parameter
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--class_list', type=str)
args = parser.parse_args()
# Set Class Names
def set_class_names(class_list):
f = open(class_list, mode='r')
class_names = []
while True:
line = f.readline().strip()
if not line:
break
class_names += [line]
f.close()
return class_names
# Normalize Bounding Box
def normalizeBB(x_min, x_max, y_min, y_max, width, height):
x_center = (x_min + x_max) * 0.5 / float(width)
y_center = (y_min + y_max) * 0.5 / float(height)
x_range = (x_max - x_min) / float(width)
y_range = (y_max - y_min) / float(height)
return x_center, y_center, x_range, y_range
# Convert XML into TXT
def convertXML2TXT(class_names, pathI, pathO):
fileI = open(pathI, mode='r')
fileO = open(pathO, mode='w')
tree = ET.parse(fileI)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
for obj in root.iter('object'):
class_name = obj.find('name').text
class_id = class_names.index(class_name)
BB = obj.find('bndbox')
x_min = float(BB.find('xmin').text)
x_max = float(BB.find('xmax').text)
y_min = float(BB.find('ymin').text)
y_max = float(BB.find('ymax').text)
x_center, y_center, x_range, y_range = normalizeBB(x_min, x_max, y_min, y_max, width, height)
fileO.write(f'{class_id} {x_center} {y_center} {x_range} {y_range}\n')
fileI.close()
fileO.close()
if __name__ == '__main__':
# Get File Names
fnames = []
for f in glob.glob(f'{args.input_dir}/*.xml'):
fnames.append(os.path.splitext(os.path.split(f)[1])[0])
# Set Class Names
class_names = set_class_names(args.class_list)
# Convert XML into TXT
os.makedirs(f'{args.output_dir}', exist_ok=False)
for f in fnames:
pathI = f'{args.input_dir}/{f}.xml'
pathO = f'{args.output_dir}/{f}.txt'
convertXML2TXT(class_names, pathI, pathO)
| 27.91358 | 101 | 0.632906 | import os
import glob
import argparse
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--class_list', type=str)
args = parser.parse_args()
def set_class_names(class_list):
f = open(class_list, mode='r')
class_names = []
while True:
line = f.readline().strip()
if not line:
break
class_names += [line]
f.close()
return class_names
def normalizeBB(x_min, x_max, y_min, y_max, width, height):
x_center = (x_min + x_max) * 0.5 / float(width)
y_center = (y_min + y_max) * 0.5 / float(height)
x_range = (x_max - x_min) / float(width)
y_range = (y_max - y_min) / float(height)
return x_center, y_center, x_range, y_range
def convertXML2TXT(class_names, pathI, pathO):
fileI = open(pathI, mode='r')
fileO = open(pathO, mode='w')
tree = ET.parse(fileI)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
for obj in root.iter('object'):
class_name = obj.find('name').text
class_id = class_names.index(class_name)
BB = obj.find('bndbox')
x_min = float(BB.find('xmin').text)
x_max = float(BB.find('xmax').text)
y_min = float(BB.find('ymin').text)
y_max = float(BB.find('ymax').text)
x_center, y_center, x_range, y_range = normalizeBB(x_min, x_max, y_min, y_max, width, height)
fileO.write(f'{class_id} {x_center} {y_center} {x_range} {y_range}\n')
fileI.close()
fileO.close()
if __name__ == '__main__':
fnames = []
for f in glob.glob(f'{args.input_dir}/*.xml'):
fnames.append(os.path.splitext(os.path.split(f)[1])[0])
class_names = set_class_names(args.class_list)
os.makedirs(f'{args.output_dir}', exist_ok=False)
for f in fnames:
pathI = f'{args.input_dir}/{f}.xml'
pathO = f'{args.output_dir}/{f}.txt'
convertXML2TXT(class_names, pathI, pathO)
| true | true |
f71be6b44b2393e0c5756152ad6854307836bc3b | 20,079 | py | Python | tests/test_data/test_datasets/test_kitti_dataset.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_kitti_dataset.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_kitti_dataset.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import math
import os
import tempfile
import numpy as np
import pytest
import torch
from mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period
from mmdet3d.datasets import KittiDataset
def _generate_kitti_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1.0, 1.0],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
modality = dict(use_lidar=True, use_camera=False)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def _generate_kitti_multi_modality_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(type='Resize', multiscale_mode='value', keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
])
]
modality = dict(use_lidar=True, use_camera=True)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def test_getitem():
np.random.seed(0)
data_root, ann_file, classes, pts_prefix, \
_, modality, split = _generate_kitti_dataset_config()
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=dict(backend='disk')),
dict(
type='ObjectSample',
db_sampler=dict(
data_root='tests/data/kitti/',
# in coordinate system refactor, this test file is modified
info_path='tests/data/kitti/kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Pedestrian=10)),
classes=['Pedestrian', 'Cyclist', 'Car'],
sample_groups=dict(Pedestrian=6))),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='ObjectRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(type='PointShuffle'),
dict(
type='DefaultFormatBundle3D',
class_names=['Pedestrian', 'Cyclist', 'Car']),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
data = kitti_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
expected_gt_bboxes_3d = torch.tensor(
[[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])
expected_gt_labels_3d = torch.tensor([0])
rot_matrix = data['img_metas']._data['pcd_rotation']
rot_angle = data['img_metas']._data['pcd_rotation_angle']
horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
vertical_flip = data['img_metas']._data['pcd_vertical_flip']
expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],
[-0.5976, 0.8018, 0.0000],
[0.0000, 0.0000, 1.0000]])
expected_rot_angle = 0.6404654291602163
noise_angle = 0.20247319
assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)
assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)
assert horizontal_flip is True
assert vertical_flip is False
# after coord system refactor
expected_gt_bboxes_3d[:, :3] = \
expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix
expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \
+ 2 * rot_angle - 2 * noise_angle
expected_gt_bboxes_3d[:, -1:] = limit_period(
expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)
assert points.shape == (780, 4)
assert torch.allclose(
gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)
assert torch.all(gt_labels_3d == expected_gt_labels_3d)
# test multi-modality KITTI dataset
np.random.seed(0)
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
multi_modality_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(
type='Resize',
img_scale=[(640, 192), (2560, 768)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0.2, 0.2, 0.2]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=classes),
dict(
type='Collect3D',
keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),
]
modality = dict(use_lidar=True, use_camera=True)
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
data = kitti_dataset[0]
img = data['img']._data
lidar2img = data['img_metas']._data['lidar2img']
expected_lidar2img = np.array(
[[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],
[1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],
[9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
assert img.shape[:] == (3, 416, 1344)
assert np.allclose(lidar2img, expected_lidar2img)
def test_evaluate():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
metric = ['mAP']
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
ap_dict = kitti_dataset.evaluate([result], metric)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],
3.0303030303030307)
def test_show():
from os import path as osp
import mmcv
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(
data_root, ann_file, split=split, modality=modality, pipeline=pipeline)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],
[33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],
[46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],
[33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],
[58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [result]
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test show with pipeline
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test multi-modality show
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
_, _, _, _, multi_modality_pipeline, modality, _ = \
_generate_kitti_multi_modality_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
# test multi-modality show with pipeline
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
def test_format_results():
from mmdet3d.core.bbox import LiDARInstance3DBoxes
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
# coord system refactor
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
result_files, tmp_dir = kitti_dataset.format_results(results)
expected_name = np.array(['Pedestrian'])
expected_truncated = np.array([0.])
expected_occluded = np.array([0])
# coord sys refactor
expected_alpha = np.array(-3.3410306 + np.pi)
expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])
expected_dimensions = np.array([[1.2, 1.89, 0.48]])
expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])
expected_rotation_y = np.array([0.0100])
expected_score = np.array([0.5])
expected_sample_idx = np.array([0])
assert np.all(result_files[0]['name'] == expected_name)
assert np.allclose(result_files[0]['truncated'], expected_truncated)
assert np.all(result_files[0]['occluded'] == expected_occluded)
assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)
assert np.allclose(result_files[0]['bbox'], expected_bbox)
assert np.allclose(result_files[0]['dimensions'], expected_dimensions)
assert np.allclose(result_files[0]['location'], expected_location)
assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,
1e-3)
assert np.allclose(result_files[0]['score'], expected_score)
assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)
tmp_dir.cleanup()
def test_bbox2result_kitti():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
expected_name = np.array(['Pedestrian'])
expected_dimensions = np.array([1.2000, 1.8900, 0.4800])
# coord system refactor (reverse sign)
expected_rotation_y = 0.0100
expected_score = np.array([0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)
assert np.allclose(det_annos[0]['score'], expected_score)
assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))
labels_3d = torch.tensor([])
scores_3d = torch.tensor([])
empty_result = dict(
boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [empty_result]
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
def test_bbox2result_kitti2d():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],
[33.3189, 0.1981, 0.3136, 0.5656, 0.5]],
[[46.1366, -4.6404, -0.9510, 0.5162, 0.5],
[33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])
det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)
expected_name = np.array(
['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])
expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],
[33.3189, 0.1981, 0.3136, 0.5656],
[46.1366, -4.6404, -0.951, 0.5162],
[33.2646, 0.2297, 0.3446, 0.5746]])
expected_score = np.array([0.5, 0.5, 0.5, 0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['bbox'], expected_bbox)
assert np.allclose(det_annos[0]['score'], expected_score)
| 41.91858 | 79 | 0.619852 |
import math
import os
import tempfile
import numpy as np
import pytest
import torch
from mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period
from mmdet3d.datasets import KittiDataset
def _generate_kitti_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1.0, 1.0],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
modality = dict(use_lidar=True, use_camera=False)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def _generate_kitti_multi_modality_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(type='Resize', multiscale_mode='value', keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
])
]
modality = dict(use_lidar=True, use_camera=True)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def test_getitem():
np.random.seed(0)
data_root, ann_file, classes, pts_prefix, \
_, modality, split = _generate_kitti_dataset_config()
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=dict(backend='disk')),
dict(
type='ObjectSample',
db_sampler=dict(
data_root='tests/data/kitti/',
info_path='tests/data/kitti/kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Pedestrian=10)),
classes=['Pedestrian', 'Cyclist', 'Car'],
sample_groups=dict(Pedestrian=6))),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='ObjectRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(type='PointShuffle'),
dict(
type='DefaultFormatBundle3D',
class_names=['Pedestrian', 'Cyclist', 'Car']),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
data = kitti_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
expected_gt_bboxes_3d = torch.tensor(
[[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])
expected_gt_labels_3d = torch.tensor([0])
rot_matrix = data['img_metas']._data['pcd_rotation']
rot_angle = data['img_metas']._data['pcd_rotation_angle']
horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
vertical_flip = data['img_metas']._data['pcd_vertical_flip']
expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],
[-0.5976, 0.8018, 0.0000],
[0.0000, 0.0000, 1.0000]])
expected_rot_angle = 0.6404654291602163
noise_angle = 0.20247319
assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)
assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)
assert horizontal_flip is True
assert vertical_flip is False
expected_gt_bboxes_3d[:, :3] = \
expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix
expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \
+ 2 * rot_angle - 2 * noise_angle
expected_gt_bboxes_3d[:, -1:] = limit_period(
expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)
assert points.shape == (780, 4)
assert torch.allclose(
gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)
assert torch.all(gt_labels_3d == expected_gt_labels_3d)
np.random.seed(0)
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
multi_modality_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(
type='Resize',
img_scale=[(640, 192), (2560, 768)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0.2, 0.2, 0.2]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=classes),
dict(
type='Collect3D',
keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),
]
modality = dict(use_lidar=True, use_camera=True)
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
data = kitti_dataset[0]
img = data['img']._data
lidar2img = data['img_metas']._data['lidar2img']
expected_lidar2img = np.array(
[[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],
[1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],
[9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
assert img.shape[:] == (3, 416, 1344)
assert np.allclose(lidar2img, expected_lidar2img)
def test_evaluate():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
metric = ['mAP']
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
ap_dict = kitti_dataset.evaluate([result], metric)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],
3.0303030303030307)
def test_show():
from os import path as osp
import mmcv
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(
data_root, ann_file, split=split, modality=modality, pipeline=pipeline)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],
[33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],
[46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],
[33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],
[58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [result]
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
_, _, _, _, multi_modality_pipeline, modality, _ = \
_generate_kitti_multi_modality_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
def test_format_results():
from mmdet3d.core.bbox import LiDARInstance3DBoxes
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
result_files, tmp_dir = kitti_dataset.format_results(results)
expected_name = np.array(['Pedestrian'])
expected_truncated = np.array([0.])
expected_occluded = np.array([0])
expected_alpha = np.array(-3.3410306 + np.pi)
expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])
expected_dimensions = np.array([[1.2, 1.89, 0.48]])
expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])
expected_rotation_y = np.array([0.0100])
expected_score = np.array([0.5])
expected_sample_idx = np.array([0])
assert np.all(result_files[0]['name'] == expected_name)
assert np.allclose(result_files[0]['truncated'], expected_truncated)
assert np.all(result_files[0]['occluded'] == expected_occluded)
assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)
assert np.allclose(result_files[0]['bbox'], expected_bbox)
assert np.allclose(result_files[0]['dimensions'], expected_dimensions)
assert np.allclose(result_files[0]['location'], expected_location)
assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,
1e-3)
assert np.allclose(result_files[0]['score'], expected_score)
assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)
tmp_dir.cleanup()
def test_bbox2result_kitti():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
expected_name = np.array(['Pedestrian'])
expected_dimensions = np.array([1.2000, 1.8900, 0.4800])
expected_rotation_y = 0.0100
expected_score = np.array([0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)
assert np.allclose(det_annos[0]['score'], expected_score)
assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))
labels_3d = torch.tensor([])
scores_3d = torch.tensor([])
empty_result = dict(
boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [empty_result]
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
def test_bbox2result_kitti2d():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],
[33.3189, 0.1981, 0.3136, 0.5656, 0.5]],
[[46.1366, -4.6404, -0.9510, 0.5162, 0.5],
[33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])
det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)
expected_name = np.array(
['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])
expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],
[33.3189, 0.1981, 0.3136, 0.5656],
[46.1366, -4.6404, -0.951, 0.5162],
[33.2646, 0.2297, 0.3446, 0.5746]])
expected_score = np.array([0.5, 0.5, 0.5, 0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['bbox'], expected_bbox)
assert np.allclose(det_annos[0]['score'], expected_score)
| true | true |
f71be6eab3547a134de4d6606f48330f24f83fbe | 8,080 | py | Python | scripts/build_load_data.py | millingermarkus/pypsa-eur | 2e39a21299036c0cec86fe4707de06a42ec15d62 | [
"MIT"
] | null | null | null | scripts/build_load_data.py | millingermarkus/pypsa-eur | 2e39a21299036c0cec86fe4707de06a42ec15d62 | [
"MIT"
] | null | null | null | scripts/build_load_data.py | millingermarkus/pypsa-eur | 2e39a21299036c0cec86fe4707de06a42ec15d62 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
load:
interpolate_limit:
time_shift_for_large_gaps:
manual_adjustments:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`load_cf`
Inputs
------
Outputs
-------
- ``resource/time_series_60min_singleindex_filtered.csv``:
"""
import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd
import numpy as np
import dateutil
from pandas import Timedelta as Delta
def load_timeseries(fn, years, countries, powerstatistics=True):
"""
Read load data from OPSD time-series package version 2020-10-06.
Parameters
----------
years : None or slice()
Years for which to read load data (defaults to
slice("2018","2019"))
fn : str
File name or url location (file format .csv)
countries : listlike
Countries for which to read load data.
powerstatistics: bool
Whether the electricity consumption data of the ENTSOE power
statistics (if true) or of the ENTSOE transparency map (if false)
should be parsed.
Returns
-------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
"""
logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else '_transparency'
pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'})
.filter(items=countries)
.loc[years])
def consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform('sum').fillna(0))
def fill_large_gaps(ds, shift):
"""
Fill up large gaps with load data from the previous week.
This function fills gaps ragning from 3 to 168 hours (one week).
"""
shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h')
if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for '
'copying time slices.')
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df):
def max_consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum().max())
consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max()
return pd.concat([total, consecutive, max_total_per_month],
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
def copy_timeslice(load, cntry, start, stop, delta):
start = pd.Timestamp(start)
stop = pd.Timestamp(stop)
if start-delta in load.index and stop in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
def manual_adjustment(load, powerstatistics):
"""
Adjust gaps manual for load data from OPSD time-series package.
1. For the ENTSOE power statistics load data (if powerstatistics is True)
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy
consumptions reported by IEA Data browser [0] for the year 2016.
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Parameters
----------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool
Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map
Returns
-------
load : pd.DataFrame
Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries
"""
if powerstatistics:
if 'MK' in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4)
if 'RS' in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
# is a WE, so take WE before
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
# whole january missing
copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))
else:
if 'ME' in load:
if 'AL' not in load and 'AL' in countries:
load['AL'] = load.ME * (5.7/2.9)
if 'MK' not in load and 'MK' in countries:
load['MK'] = load.ME * (6.7/2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
return load
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
configure_logging(snakemake)
config = snakemake.config
powerstatistics = config['load']['power_statistics']
interpolate_limit = config['load']['interpolate_limit']
countries = config['countries']
snapshots = pd.date_range(freq='h', **config['snapshots'])
years = slice(snapshots[0], snapshots[-1])
time_shift = config['load']['time_shift_for_large_gaps']
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if config['load']['manual_adjustments']:
load = manual_adjustment(load, powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period "
f"'{time_shift}'.")
load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters '
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
'for implementing the needed load data modifications.')
load.to_csv(snakemake.output[0])
| 35.752212 | 415 | 0.653713 |
import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd
import numpy as np
import dateutil
from pandas import Timedelta as Delta
def load_timeseries(fn, years, countries, powerstatistics=True):
logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else '_transparency'
pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'})
.filter(items=countries)
.loc[years])
def consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform('sum').fillna(0))
def fill_large_gaps(ds, shift):
shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h')
if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for '
'copying time slices.')
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df):
def max_consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum().max())
consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max()
return pd.concat([total, consecutive, max_total_per_month],
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
def copy_timeslice(load, cntry, start, stop, delta):
start = pd.Timestamp(start)
stop = pd.Timestamp(stop)
if start-delta in load.index and stop in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
def manual_adjustment(load, powerstatistics):
if powerstatistics:
if 'MK' in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4)
if 'RS' in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))
else:
if 'ME' in load:
if 'AL' not in load and 'AL' in countries:
load['AL'] = load.ME * (5.7/2.9)
if 'MK' not in load and 'MK' in countries:
load['MK'] = load.ME * (6.7/2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
return load
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
configure_logging(snakemake)
config = snakemake.config
powerstatistics = config['load']['power_statistics']
interpolate_limit = config['load']['interpolate_limit']
countries = config['countries']
snapshots = pd.date_range(freq='h', **config['snapshots'])
years = slice(snapshots[0], snapshots[-1])
time_shift = config['load']['time_shift_for_large_gaps']
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if config['load']['manual_adjustments']:
load = manual_adjustment(load, powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period "
f"'{time_shift}'.")
load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters '
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
'for implementing the needed load data modifications.')
load.to_csv(snakemake.output[0])
| true | true |
f71be7330e1eaec34340d30bcdded8315ca8fdf3 | 5,076 | py | Python | tests/unit/driver/generic/test_generic_sync_driver.py | haccht/scrapli | 89589ee78c36296ee67813fcbedebee9b41b6bca | [
"MIT"
] | 1 | 2020-02-09T17:43:43.000Z | 2020-02-09T17:43:43.000Z | tests/unit/driver/generic/test_generic_sync_driver.py | haccht/scrapli | 89589ee78c36296ee67813fcbedebee9b41b6bca | [
"MIT"
] | null | null | null | tests/unit/driver/generic/test_generic_sync_driver.py | haccht/scrapli | 89589ee78c36296ee67813fcbedebee9b41b6bca | [
"MIT"
] | null | null | null | import pytest
from scrapli.driver.generic.base_driver import ReadCallback
from scrapli.exceptions import ScrapliValueError
def test_get_prompt(monkeypatch, sync_generic_driver):
# stupid test w/ the patch, but want coverage and in the future maybe the driver actually
# does something to the prompt it gets from the channel
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.get_prompt", lambda x: "scrapli>")
assert sync_generic_driver.get_prompt() == "scrapli>"
def test__send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver._send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test__send_command_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver._send_command(command="nada")
def test_send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_commands(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands(commands=["nada", "nada2"])
assert len(actual_response) == 2
assert actual_response.failed is False
assert actual_response[0].failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_commands_from_file(fs, monkeypatch, real_ssh_commands_file_path, sync_generic_driver):
fs.add_real_file(source_path=real_ssh_commands_file_path, target_path="/commands")
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands_from_file(file="commands")
assert actual_response.failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_and_read(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input_and_read",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_and_read(channel_input="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_and_read_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_and_read(channel_input="nada")
def test_send_interactive(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_inputs_interact",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_interactive(interact_events=[("nada", "scrapli>")])
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_interact_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_interactive(interact_events=[])
def test_readcallback_basic(monkeypatch, sync_generic_driver):
def _read(cls):
return b"rtr1#"
def _write(cls, channel_input, redacted=False):
return
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.read", _read)
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.write", _write)
callback_one_counter = 0
callback_two_counter = 0
def callback_one(cls, read_output):
nonlocal callback_one_counter
callback_one_counter += 1
def callback_two(cls, read_output):
nonlocal callback_two_counter
callback_two_counter += 1
callbacks = [
ReadCallback(
contains="rtr1#",
callback=callback_one,
name="call1",
case_insensitive=False,
only_once=True,
),
ReadCallback(
contains_re=r"^rtr1#",
callback=callback_two,
complete=True,
),
]
sync_generic_driver.read_callback(callbacks=callbacks, initial_input="nada")
assert callback_one_counter == 1
assert callback_two_counter == 1
| 35.496503 | 100 | 0.731678 | import pytest
from scrapli.driver.generic.base_driver import ReadCallback
from scrapli.exceptions import ScrapliValueError
def test_get_prompt(monkeypatch, sync_generic_driver):
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.get_prompt", lambda x: "scrapli>")
assert sync_generic_driver.get_prompt() == "scrapli>"
def test__send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver._send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test__send_command_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver._send_command(command="nada")
def test_send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_commands(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands(commands=["nada", "nada2"])
assert len(actual_response) == 2
assert actual_response.failed is False
assert actual_response[0].failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_commands_from_file(fs, monkeypatch, real_ssh_commands_file_path, sync_generic_driver):
fs.add_real_file(source_path=real_ssh_commands_file_path, target_path="/commands")
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands_from_file(file="commands")
assert actual_response.failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_and_read(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input_and_read",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_and_read(channel_input="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_and_read_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_and_read(channel_input="nada")
def test_send_interactive(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_inputs_interact",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_interactive(interact_events=[("nada", "scrapli>")])
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_interact_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_interactive(interact_events=[])
def test_readcallback_basic(monkeypatch, sync_generic_driver):
def _read(cls):
return b"rtr1#"
def _write(cls, channel_input, redacted=False):
return
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.read", _read)
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.write", _write)
callback_one_counter = 0
callback_two_counter = 0
def callback_one(cls, read_output):
nonlocal callback_one_counter
callback_one_counter += 1
def callback_two(cls, read_output):
nonlocal callback_two_counter
callback_two_counter += 1
callbacks = [
ReadCallback(
contains="rtr1#",
callback=callback_one,
name="call1",
case_insensitive=False,
only_once=True,
),
ReadCallback(
contains_re=r"^rtr1#",
callback=callback_two,
complete=True,
),
]
sync_generic_driver.read_callback(callbacks=callbacks, initial_input="nada")
assert callback_one_counter == 1
assert callback_two_counter == 1
| true | true |
f71be7afa0a994c58e2f0076275e35e23af1c2a5 | 16,119 | py | Python | utils_twc/kg.py | daiki-kimura/commonsense-rl | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | [
"Apache-2.0"
] | null | null | null | utils_twc/kg.py | daiki-kimura/commonsense-rl | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | [
"Apache-2.0"
] | null | null | null | utils_twc/kg.py | daiki-kimura/commonsense-rl | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | [
"Apache-2.0"
] | null | null | null | import sys
import networkx as nx
import logging
import json
import requests
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from utils_twc.generic import escape_entities
# Logging formatting
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level='INFO', stream=sys.stdout)
kg = {}
source_paths= defaultdict(dict)
def shortest_path_subgraph(kg_graph, prev_graph, nodes, inventory_entities=None, command_entities=None, path_len=2, add_all_path=False):
if inventory_entities is None:
inventory_entities = []
if command_entities is None:
command_entities = []
# Get non-neighbor nodes: nodes without edges between them
world_graph = kg_graph.subgraph(list(prev_graph.nodes)+nodes).copy()
world_graph = nx.compose(prev_graph,world_graph)
world_graph.remove_edges_from(nx.selfloop_edges(world_graph))
if path_len < 2:
return world_graph
triplets = []
# Add command related relations
pruned_entities = list(set(command_entities)-set(inventory_entities))
if pruned_entities:
for src_et in inventory_entities:
for tgt_et in pruned_entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist = 0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
else: # no items in the pruned entities, won't happen
for entities in command_entities:
for src_et in entities:
for tgt_et in entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist=0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
world_graph, _= add_triplets_to_graph(world_graph, triplets)
return world_graph
def construct_graph(triplets):
graph = nx.DiGraph()
entities = {}
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def add_triplets_to_graph(graph, triplets):
entities = dict(graph.nodes.data())
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def draw_graph(graph, title="cleanup", show_relation=True, weights=None, pos=None):
if not pos:
pos = nx.spring_layout(graph, k=0.95)
if weights:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color=weights.tolist(),
vmin=np.min(weights), vmax=np.max(weights), node_shape='o', alpha=0.9, font_size=8, with_labels=True,
label=title,cmap='Blues')
else:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color='pink',
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label=title)
if show_relation:
p_edge = nx.draw_networkx_edge_labels(graph, pos, font_size=6, font_color='red',
edge_labels=nx.get_edge_attributes(graph, 'relation'))
def draw_graph_colormap(graph,node_weights, showbar=False, cmap='YlGnBu'):
# node_weights: maps node id/name to attention weights
pos = nx.spring_layout(graph, k=0.95)
weights = []
for node in graph.nodes:
weights.append(node_weights[node])
# cmap = plt.cm.YlGnBu#RdBu
cmap = plt.get_cmap(cmap)
vmin = np.min(weights)
vmax = np.max(weights)
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000,
node_color=weights, vmin=vmin, vmax=vmax, cmap=cmap,
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label='Attention')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
if showbar:
plt.colorbar(sm)
plt.show()
def construct_kg(filename: str, print_every=1e6, cache_load=True, logger=logging.getLogger(__name__)) -> (nx.DiGraph, list, set):
# access edges with graph.edges.data('relation')
if 'graph' in kg and cache_load:
return kg['graph'], kg['triplets'], kg['entities']
path = Path(filename)
if not path.exists():
filename = './kg/conceptnet/kg.txt'
triplets = []
with open(filename, 'r') as fp:
for idx, line in enumerate(fp):
e1, r, e2 = line.rstrip("\n").rsplit()
triplets.append([e1.lower().strip(), e2.lower().strip(), r.lower().strip()])
if idx % print_every == 0:
print("*",end='')
[graph, entities] = construct_graph(triplets)
graph = graph.to_undirected(as_view=True) # Since a->b ==> b->a
if cache_load:
kg['graph'] = graph
kg['triplets'] = triplets
kg['entities'] = entities
return graph, triplets, entities
class RelationExtractor:
def __init__(self, tokenizer, openie_url="http://localhost:9000/"):
"""
:param tokenizer:
:param openie_url: server url for Stanford Core NLPOpen IE
"""
self.tokenizer = tokenizer
self.openie_url = openie_url
self.kg_vocab = {}
self.agent_loc = ''
def call_stanford_openie(self,sentence):
querystring = {
"properties": "%7B%22annotators%22%3A%20%22openie%22%7D",
"pipelineLanguage": "en"}
response = requests.request("POST", self.openie_url, data=sentence, params=querystring)
response = json.JSONDecoder().decode(response.text)
return response
def fetch_triplets(self,text, current_graph, prev_action=None):
triplets = []
remove = []
prev_remove = []
link = []
c_id = len(self.kg_vocab.keys())
obs = self.tokenizer.clean_string(text, preprocess=True)
dirs = ['north', 'south', 'east', 'west']
obs = str(obs)
doc = self.tokenizer.nlp_eval(obs)
sents = {}
try:
sents = self.call_stanford_openie(doc.text)['sentences']
except:
print("Error in connecting to Stanford CoreNLP OpenIE Server")
for ov in sents:
tokens = ov["tokens"]
triple = ov['openie']
for tr in triple:
h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()
if h == 'we':
h = 'you'
if r == 'are in':
r = "'ve entered"
if h == 'it':
break
triplets.append((h, r, t))
room = ""
room_set = False
for rule in triplets:
h, r, t = rule
if 'entered' in r or 'are in' in r or 'walked' in r:
prev_remove.append(r)
if not room_set:
room = t
room_set = True
if 'should' in r:
prev_remove.append(r)
if 'see' in r or 'make out' in r:
link.append((r, t))
remove.append(r)
# else:
# link.append((r, t))
prev_room = self.agent_loc
self.agent_loc = room
add_rules = []
if prev_action is not None:
for d in dirs:
if d in prev_action and room != "":
add_rules.append((prev_room, d + ' of', room))
prev_room_subgraph = None
prev_you_subgraph = None
for sent in doc.sents:
sent = sent.text
if sent == ',' or sent == 'hm .':
continue
if 'exit' in sent or 'entranceway' in sent:
for d in dirs:
if d in sent:
triplets.append((room, 'has', 'exit to ' + d))
if prev_room != "":
graph_copy = current_graph.copy()
graph_copy.remove_edge('you', prev_room)
con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]
for con_c in con_cs:
if prev_room in con_c.nodes:
prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
if 'you' in con_c.nodes:
prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
for l in link:
add_rules.append((room, l[0], l[1]))
for rule in triplets:
h, r, t = rule
if r == 'is in':
if t == 'room':
t = room
if r not in remove:
add_rules.append((h, r, t))
edges = list(current_graph.edges)
for edge in edges:
r = 'relatedTo'
if 'relation' in current_graph[edge[0]][edge[1]]:
r = current_graph[edge[0]][edge[1]]['relation']
if r in prev_remove:
current_graph.remove_edge(*edge)
if prev_you_subgraph is not None:
current_graph.remove_edges_from(prev_you_subgraph.edges)
for rule in add_rules:
u = '_'.join(str(rule[0]).split())
v = '_'.join(str(rule[2]).split())
if u != 'it' and u not in self.kg_vocab:
self.kg_vocab[u] = c_id
c_id += 1
if v != 'it' and v not in self.kg_vocab:
self.kg_vocab[v] = c_id
c_id += 1
skip_flag = False
for skip_token in self.tokenizer.ignore_list:
if skip_token in u or skip_token in v:
skip_flag = True
if u != 'it' and v != 'it' and not skip_flag:
r = str(rule[1]).lower()
if not rule[1] or rule[1] == '':
r = 'relatedTo'
current_graph.add_edge(str(rule[0]).lower(), str(rule[2]).lower(), relation=r)
prev_edges = current_graph.edges
if prev_room_subgraph is not None:
current_graph.add_edges_from(prev_room_subgraph.edges)
current_edges = current_graph.edges
return current_graph, add_rules
def khop_neighbor_graph(graph, entities, cutoff=1, max_khop_degree=None):
all_entities = []
for et in entities:
candidates = nx.single_source_shortest_path(graph, et, cutoff=cutoff).keys()
if not max_khop_degree or len(candidates)<=max_khop_degree:
all_entities.extend(list(candidates))
return graph.subgraph(set(entities)|set(all_entities))
def ego_graph_seed_expansion(graph, seed, radius, undirected=True, max_degree=None):
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
marked = set(seed)
nodes = set(seed)
for _ in range(radius):
border = set()
for node in marked:
neighbors = {n for n in working_graph[node]}
if max_degree is None or len(neighbors) <= max_degree:
border |= neighbors
nodes |= border
marked = border
return graph.subgraph(nodes)
def shortest_path_seed_expansion(graph, seed, cutoff=None, undirected=True, keep_all=True):
nodes = set(seed)
seed = list(seed)
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
for i in range(len(seed)):
start = i + 1 if undirected else 0
for j in range(start, len(seed)):
try:
if not keep_all:
path = nx.shortest_path(working_graph, seed[i], seed[j])
if cutoff is None or len(path) <= cutoff:
nodes |= set(path)
else:
paths = nx.all_shortest_paths(working_graph, seed[i], seed[j])
for p in paths:
if cutoff is None or len(p) <= cutoff:
nodes |= set(p)
except nx.NetworkXNoPath:
continue
return graph.subgraph(nodes)
def load_manual_graphs(path):
path = Path(path)
manual_world_graphs = {}
if not path.exists():
print('None Found.')
return manual_world_graphs
files = path.rglob("conceptnet_manual_subgraph-*.tsv")
for file in files:
game_id = str(file).split('-')[-1].split('.')[0]
graph, triplets, entities = construct_kg(file, cache_load=False)
manual_world_graphs[game_id]={}
manual_world_graphs[game_id]['graph'] = graph
manual_world_graphs[game_id]['triplets'] = triplets
manual_world_graphs[game_id]['entities'] = entities
print(' DONE')
return manual_world_graphs
def kg_match(extractor, target_entities, kg_entities):
result = set()
kg_entities = escape_entities(kg_entities)
for e in target_entities:
e = e.lower().strip()
result |= extractor(e, kg_entities)
return result
def save_graph_tsv(graph, path):
relation_map = nx.get_edge_attributes(graph, 'relation')
lines = []
for n1, n2 in graph.edges:
relations = relation_map[n1, n2].split()
for r in relations:
lines.append(f'{n1}\t{r}\t{n2}\n')
with open(path, 'w') as f:
f.writelines(lines)
if __name__ == '__main__':
from utils_twc import extractor
from utils_twc.nlp import Tokenizer
tk_extractor = extractor.get_extractor('max')
tokenizer = Tokenizer(extractor=tk_extractor)
rel_extract = RelationExtractor(tokenizer,openie_url='http://iqa962.sl.cloud9.ibm.com:9000/')
# text = 'On the table, you see an apple, a hat, a key and an umbrella. '
text = "You've just walked into a Living Room. You try to gain information on your " \
"surroundings by using a technique you call looking. You can see a closet. " \
"You idly wonder how they came up with the name TextWorld for this place. " \
"It's pretty fitting. A closed standard looking antique trunk is in the room. " \
"You can see a table. The table is usual. On the table you see an apple, a mug, " \
"a newspaper, a note, a hat and a pencil. You smell a sickening smell, and follow " \
"it to a couch. The couch is standard. But the thing is empty. Hm. Oh well You see a " \
"gleam over in a corner, where you can see a tv stand. The tv stand is ordinary. " \
"On the tv stand you can make out a tv. You don't like doors? Why not try going east, " \
"that entranceway is unguarded. You are carrying nothing."
sents = text
# clauses = clausie.clausie(text)
# propositions = clausie.extract_propositions(clauses)
# sents = ''
# for prop in propositions:
# sent = clausie.proposition_text_str(prop)
# sents += sent
# print(sent)
graph, add_rules = rel_extract.fetch_triplets(sents, nx.DiGraph())
print(add_rules)
| 37.927059 | 136 | 0.578262 | import sys
import networkx as nx
import logging
import json
import requests
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from utils_twc.generic import escape_entities
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level='INFO', stream=sys.stdout)
kg = {}
source_paths= defaultdict(dict)
def shortest_path_subgraph(kg_graph, prev_graph, nodes, inventory_entities=None, command_entities=None, path_len=2, add_all_path=False):
if inventory_entities is None:
inventory_entities = []
if command_entities is None:
command_entities = []
world_graph = kg_graph.subgraph(list(prev_graph.nodes)+nodes).copy()
world_graph = nx.compose(prev_graph,world_graph)
world_graph.remove_edges_from(nx.selfloop_edges(world_graph))
if path_len < 2:
return world_graph
triplets = []
pruned_entities = list(set(command_entities)-set(inventory_entities))
if pruned_entities:
for src_et in inventory_entities:
for tgt_et in pruned_entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist = 0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
else:
for entities in command_entities:
for src_et in entities:
for tgt_et in entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist=0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
world_graph, _= add_triplets_to_graph(world_graph, triplets)
return world_graph
def construct_graph(triplets):
graph = nx.DiGraph()
entities = {}
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def add_triplets_to_graph(graph, triplets):
entities = dict(graph.nodes.data())
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def draw_graph(graph, title="cleanup", show_relation=True, weights=None, pos=None):
if not pos:
pos = nx.spring_layout(graph, k=0.95)
if weights:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color=weights.tolist(),
vmin=np.min(weights), vmax=np.max(weights), node_shape='o', alpha=0.9, font_size=8, with_labels=True,
label=title,cmap='Blues')
else:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color='pink',
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label=title)
if show_relation:
p_edge = nx.draw_networkx_edge_labels(graph, pos, font_size=6, font_color='red',
edge_labels=nx.get_edge_attributes(graph, 'relation'))
def draw_graph_colormap(graph,node_weights, showbar=False, cmap='YlGnBu'):
# node_weights: maps node id/name to attention weights
pos = nx.spring_layout(graph, k=0.95)
weights = []
for node in graph.nodes:
weights.append(node_weights[node])
# cmap = plt.cm.YlGnBu#RdBu
cmap = plt.get_cmap(cmap)
vmin = np.min(weights)
vmax = np.max(weights)
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000,
node_color=weights, vmin=vmin, vmax=vmax, cmap=cmap,
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label='Attention')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
if showbar:
plt.colorbar(sm)
plt.show()
def construct_kg(filename: str, print_every=1e6, cache_load=True, logger=logging.getLogger(__name__)) -> (nx.DiGraph, list, set):
# access edges with graph.edges.data('relation')
if 'graph' in kg and cache_load:
return kg['graph'], kg['triplets'], kg['entities']
path = Path(filename)
if not path.exists():
filename = './kg/conceptnet/kg.txt'
triplets = []
with open(filename, 'r') as fp:
for idx, line in enumerate(fp):
e1, r, e2 = line.rstrip("\n").rsplit()
triplets.append([e1.lower().strip(), e2.lower().strip(), r.lower().strip()])
if idx % print_every == 0:
print("*",end='')
[graph, entities] = construct_graph(triplets)
graph = graph.to_undirected(as_view=True) # Since a->b ==> b->a
if cache_load:
kg['graph'] = graph
kg['triplets'] = triplets
kg['entities'] = entities
return graph, triplets, entities
class RelationExtractor:
def __init__(self, tokenizer, openie_url="http://localhost:9000/"):
self.tokenizer = tokenizer
self.openie_url = openie_url
self.kg_vocab = {}
self.agent_loc = ''
def call_stanford_openie(self,sentence):
querystring = {
"properties": "%7B%22annotators%22%3A%20%22openie%22%7D",
"pipelineLanguage": "en"}
response = requests.request("POST", self.openie_url, data=sentence, params=querystring)
response = json.JSONDecoder().decode(response.text)
return response
def fetch_triplets(self,text, current_graph, prev_action=None):
triplets = []
remove = []
prev_remove = []
link = []
c_id = len(self.kg_vocab.keys())
obs = self.tokenizer.clean_string(text, preprocess=True)
dirs = ['north', 'south', 'east', 'west']
obs = str(obs)
doc = self.tokenizer.nlp_eval(obs)
sents = {}
try:
sents = self.call_stanford_openie(doc.text)['sentences']
except:
print("Error in connecting to Stanford CoreNLP OpenIE Server")
for ov in sents:
tokens = ov["tokens"]
triple = ov['openie']
for tr in triple:
h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()
if h == 'we':
h = 'you'
if r == 'are in':
r = "'ve entered"
if h == 'it':
break
triplets.append((h, r, t))
room = ""
room_set = False
for rule in triplets:
h, r, t = rule
if 'entered' in r or 'are in' in r or 'walked' in r:
prev_remove.append(r)
if not room_set:
room = t
room_set = True
if 'should' in r:
prev_remove.append(r)
if 'see' in r or 'make out' in r:
link.append((r, t))
remove.append(r)
prev_room = self.agent_loc
self.agent_loc = room
add_rules = []
if prev_action is not None:
for d in dirs:
if d in prev_action and room != "":
add_rules.append((prev_room, d + ' of', room))
prev_room_subgraph = None
prev_you_subgraph = None
for sent in doc.sents:
sent = sent.text
if sent == ',' or sent == 'hm .':
continue
if 'exit' in sent or 'entranceway' in sent:
for d in dirs:
if d in sent:
triplets.append((room, 'has', 'exit to ' + d))
if prev_room != "":
graph_copy = current_graph.copy()
graph_copy.remove_edge('you', prev_room)
con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]
for con_c in con_cs:
if prev_room in con_c.nodes:
prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
if 'you' in con_c.nodes:
prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
for l in link:
add_rules.append((room, l[0], l[1]))
for rule in triplets:
h, r, t = rule
if r == 'is in':
if t == 'room':
t = room
if r not in remove:
add_rules.append((h, r, t))
edges = list(current_graph.edges)
for edge in edges:
r = 'relatedTo'
if 'relation' in current_graph[edge[0]][edge[1]]:
r = current_graph[edge[0]][edge[1]]['relation']
if r in prev_remove:
current_graph.remove_edge(*edge)
if prev_you_subgraph is not None:
current_graph.remove_edges_from(prev_you_subgraph.edges)
for rule in add_rules:
u = '_'.join(str(rule[0]).split())
v = '_'.join(str(rule[2]).split())
if u != 'it' and u not in self.kg_vocab:
self.kg_vocab[u] = c_id
c_id += 1
if v != 'it' and v not in self.kg_vocab:
self.kg_vocab[v] = c_id
c_id += 1
skip_flag = False
for skip_token in self.tokenizer.ignore_list:
if skip_token in u or skip_token in v:
skip_flag = True
if u != 'it' and v != 'it' and not skip_flag:
r = str(rule[1]).lower()
if not rule[1] or rule[1] == '':
r = 'relatedTo'
current_graph.add_edge(str(rule[0]).lower(), str(rule[2]).lower(), relation=r)
prev_edges = current_graph.edges
if prev_room_subgraph is not None:
current_graph.add_edges_from(prev_room_subgraph.edges)
current_edges = current_graph.edges
return current_graph, add_rules
def khop_neighbor_graph(graph, entities, cutoff=1, max_khop_degree=None):
all_entities = []
for et in entities:
candidates = nx.single_source_shortest_path(graph, et, cutoff=cutoff).keys()
if not max_khop_degree or len(candidates)<=max_khop_degree:
all_entities.extend(list(candidates))
return graph.subgraph(set(entities)|set(all_entities))
def ego_graph_seed_expansion(graph, seed, radius, undirected=True, max_degree=None):
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
marked = set(seed)
nodes = set(seed)
for _ in range(radius):
border = set()
for node in marked:
neighbors = {n for n in working_graph[node]}
if max_degree is None or len(neighbors) <= max_degree:
border |= neighbors
nodes |= border
marked = border
return graph.subgraph(nodes)
def shortest_path_seed_expansion(graph, seed, cutoff=None, undirected=True, keep_all=True):
nodes = set(seed)
seed = list(seed)
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
for i in range(len(seed)):
start = i + 1 if undirected else 0
for j in range(start, len(seed)):
try:
if not keep_all:
path = nx.shortest_path(working_graph, seed[i], seed[j])
if cutoff is None or len(path) <= cutoff:
nodes |= set(path)
else:
paths = nx.all_shortest_paths(working_graph, seed[i], seed[j])
for p in paths:
if cutoff is None or len(p) <= cutoff:
nodes |= set(p)
except nx.NetworkXNoPath:
continue
return graph.subgraph(nodes)
def load_manual_graphs(path):
path = Path(path)
manual_world_graphs = {}
if not path.exists():
print('None Found.')
return manual_world_graphs
files = path.rglob("conceptnet_manual_subgraph-*.tsv")
for file in files:
game_id = str(file).split('-')[-1].split('.')[0]
graph, triplets, entities = construct_kg(file, cache_load=False)
manual_world_graphs[game_id]={}
manual_world_graphs[game_id]['graph'] = graph
manual_world_graphs[game_id]['triplets'] = triplets
manual_world_graphs[game_id]['entities'] = entities
print(' DONE')
return manual_world_graphs
def kg_match(extractor, target_entities, kg_entities):
result = set()
kg_entities = escape_entities(kg_entities)
for e in target_entities:
e = e.lower().strip()
result |= extractor(e, kg_entities)
return result
def save_graph_tsv(graph, path):
relation_map = nx.get_edge_attributes(graph, 'relation')
lines = []
for n1, n2 in graph.edges:
relations = relation_map[n1, n2].split()
for r in relations:
lines.append(f'{n1}\t{r}\t{n2}\n')
with open(path, 'w') as f:
f.writelines(lines)
if __name__ == '__main__':
from utils_twc import extractor
from utils_twc.nlp import Tokenizer
tk_extractor = extractor.get_extractor('max')
tokenizer = Tokenizer(extractor=tk_extractor)
rel_extract = RelationExtractor(tokenizer,openie_url='http://iqa962.sl.cloud9.ibm.com:9000/')
text = "You've just walked into a Living Room. You try to gain information on your " \
"surroundings by using a technique you call looking. You can see a closet. " \
"You idly wonder how they came up with the name TextWorld for this place. " \
"It's pretty fitting. A closed standard looking antique trunk is in the room. " \
"You can see a table. The table is usual. On the table you see an apple, a mug, " \
"a newspaper, a note, a hat and a pencil. You smell a sickening smell, and follow " \
"it to a couch. The couch is standard. But the thing is empty. Hm. Oh well You see a " \
"gleam over in a corner, where you can see a tv stand. The tv stand is ordinary. " \
"On the tv stand you can make out a tv. You don't like doors? Why not try going east, " \
"that entranceway is unguarded. You are carrying nothing."
sents = text
# clauses = clausie.clausie(text)
# propositions = clausie.extract_propositions(clauses)
# sents = ''
# for prop in propositions:
# sent = clausie.proposition_text_str(prop)
# sents += sent
# print(sent)
graph, add_rules = rel_extract.fetch_triplets(sents, nx.DiGraph())
print(add_rules)
| true | true |
f71be86a3b944cd3f5b50d8b2127cb921b32bfb6 | 1,048 | py | Python | lhc/io/bed/__main__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/io/bed/__main__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/io/bed/__main__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | import argparse
from .iterator import BedEntryIterator
from lhc.io.bed.tools import depth, sort, filter
from lhc.io.txt.tools import compress
def iter_bed(fname):
it = BedEntryIterator(fname)
for entry in it:
yield entry
it.close()
def main():
args = get_parser().parse_args()
args.func(args)
def get_parser():
return define_parser(argparse.ArgumentParser())
def define_parser(parser):
subparsers = parser.add_subparsers()
# Compress parser
compress_parser = subparsers.add_parser('compress')
compress.define_parser(compress_parser)
compress_parser.set_defaults(block_delimiter='\n')
# Depth parser
depth_parser = subparsers.add_parser('depth')
depth.define_parser(depth_parser)
# Filter parser
filter_parser = subparsers.add_parser('filter')
filter.define_parser(filter_parser)
# Sort parser
sort_parser = subparsers.add_parser('sort')
sort.define_parser(sort_parser)
return parser
if __name__ == '__main__':
import sys
sys.exit(main())
| 23.818182 | 55 | 0.719466 | import argparse
from .iterator import BedEntryIterator
from lhc.io.bed.tools import depth, sort, filter
from lhc.io.txt.tools import compress
def iter_bed(fname):
it = BedEntryIterator(fname)
for entry in it:
yield entry
it.close()
def main():
args = get_parser().parse_args()
args.func(args)
def get_parser():
return define_parser(argparse.ArgumentParser())
def define_parser(parser):
subparsers = parser.add_subparsers()
compress_parser = subparsers.add_parser('compress')
compress.define_parser(compress_parser)
compress_parser.set_defaults(block_delimiter='\n')
depth_parser = subparsers.add_parser('depth')
depth.define_parser(depth_parser)
filter_parser = subparsers.add_parser('filter')
filter.define_parser(filter_parser)
sort_parser = subparsers.add_parser('sort')
sort.define_parser(sort_parser)
return parser
if __name__ == '__main__':
import sys
sys.exit(main())
| true | true |
f71be8fe9b1bed16fe43ad8a1ea794cdbcec63d2 | 7,605 | py | Python | tests/test_bootstrap.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/test_bootstrap.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 6 | 2021-02-08T20:59:36.000Z | 2022-03-12T00:52:11.000Z | tests/test_bootstrap.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Test the bootstrapping."""
# pylint: disable=protected-access
import asyncio
import logging
import os
from unittest.mock import Mock, patch
from homeassistant import bootstrap
import homeassistant.config as config_util
import homeassistant.util.dt as dt_util
from tests.common import (
MockModule,
get_test_config_dir,
mock_coro,
mock_integration,
patch_yaml_files,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
# prevent .HA_VERSION file from being written
@patch("homeassistant.bootstrap.conf_util.process_ha_config_upgrade", Mock())
@patch(
"homeassistant.util.location.async_detect_location_info",
Mock(return_value=mock_coro(None)),
)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
@patch("homeassistant.bootstrap.async_enable_logging", Mock(return_value=True))
def test_from_config_file(hass):
"""Test with configuration file."""
components = set(["browser", "conversation", "script"])
files = {"config.yaml": "".join("{}:\n".format(comp) for comp in components)}
with patch_yaml_files(files, True):
yield from bootstrap.async_from_config_file("config.yaml", hass)
assert components == hass.config.components
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
@asyncio.coroutine
def test_home_assistant_core_config_validation(hass):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done
result = yield from bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_from_config_file_not_mount_deps_folder(loop):
"""Test that we not mount the deps folder inside async_from_config_file."""
hass = Mock(async_add_executor_job=Mock(side_effect=lambda *args: mock_coro()))
with patch("homeassistant.bootstrap.is_virtual_env", return_value=False), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 1
with patch("homeassistant.bootstrap.is_virtual_env", return_value=True), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 0
async def test_load_hassio(hass):
"""Test that we load Hass.io component."""
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
async def test_empty_setup(hass):
"""Test an empty set up loads the core."""
await bootstrap._async_set_up_integrations(hass, {})
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_aborts(hass, caplog):
"""Test failing core setup aborts further setup."""
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap._async_set_up_integrations(hass, {"group": {}})
assert "core failed to initialize" in caplog.text
# We aborted early, group not set up
assert "group" not in hass.config.components
async def test_setting_up_config(hass, caplog):
"""Test we set up domains in config."""
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
async def test_setup_after_deps_all_present(hass, caplog):
"""Test after_dependencies when all present."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "first_dep", "second_dep"]
async def test_setup_after_deps_not_trigger_load(hass, caplog):
"""Test after_dependencies does not trigger loading it."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
async def test_setup_after_deps_not_present(hass, caplog):
"""Test after_dependencies when referenced integration doesn't exist."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
| 32.224576 | 86 | 0.682446 |
import asyncio
import logging
import os
from unittest.mock import Mock, patch
from homeassistant import bootstrap
import homeassistant.config as config_util
import homeassistant.util.dt as dt_util
from tests.common import (
MockModule,
get_test_config_dir,
mock_coro,
mock_integration,
patch_yaml_files,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
@patch("homeassistant.bootstrap.conf_util.process_ha_config_upgrade", Mock())
@patch(
"homeassistant.util.location.async_detect_location_info",
Mock(return_value=mock_coro(None)),
)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
@patch("homeassistant.bootstrap.async_enable_logging", Mock(return_value=True))
def test_from_config_file(hass):
components = set(["browser", "conversation", "script"])
files = {"config.yaml": "".join("{}:\n".format(comp) for comp in components)}
with patch_yaml_files(files, True):
yield from bootstrap.async_from_config_file("config.yaml", hass)
assert components == hass.config.components
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
@asyncio.coroutine
def test_home_assistant_core_config_validation(hass):
result = yield from bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_from_config_file_not_mount_deps_folder(loop):
hass = Mock(async_add_executor_job=Mock(side_effect=lambda *args: mock_coro()))
with patch("homeassistant.bootstrap.is_virtual_env", return_value=False), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 1
with patch("homeassistant.bootstrap.is_virtual_env", return_value=True), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 0
async def test_load_hassio(hass):
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
async def test_empty_setup(hass):
await bootstrap._async_set_up_integrations(hass, {})
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_aborts(hass, caplog):
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap._async_set_up_integrations(hass, {"group": {}})
assert "core failed to initialize" in caplog.text
assert "group" not in hass.config.components
async def test_setting_up_config(hass, caplog):
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
async def test_setup_after_deps_all_present(hass, caplog):
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "first_dep", "second_dep"]
async def test_setup_after_deps_not_trigger_load(hass, caplog):
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
async def test_setup_after_deps_not_present(hass, caplog):
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
| true | true |
f71beaaf95d2695d9793e2cc7434d033c3c66f17 | 148 | py | Python | cargo/infra/cargo_provider_impl.py | agiledragon/ddd-sample-in-python | 5268e580845e599d8d3488c92bd1b44f4ece2378 | [
"MIT"
] | 2 | 2018-08-24T15:09:07.000Z | 2018-10-29T01:45:21.000Z | cargo/infra/cargo_provider_impl.py | agiledragon/ddd-sample-in-python | 5268e580845e599d8d3488c92bd1b44f4ece2378 | [
"MIT"
] | null | null | null | cargo/infra/cargo_provider_impl.py | agiledragon/ddd-sample-in-python | 5268e580845e599d8d3488c92bd1b44f4ece2378 | [
"MIT"
] | 3 | 2018-07-30T02:07:21.000Z | 2021-02-18T07:04:21.000Z | from domain.model.base.provider import Provider
class CargoProviderImpl(Provider):
def confirm(self, cargo):
print 'confirm cargo'
| 14.8 | 47 | 0.722973 | from domain.model.base.provider import Provider
class CargoProviderImpl(Provider):
def confirm(self, cargo):
print 'confirm cargo'
| false | true |
f71beb6536594f288089770f787a036271ea7c72 | 1,832 | py | Python | lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 808 | 2018-04-17T17:43:12.000Z | 2019-08-18T07:39:13.000Z | lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 728 | 2018-04-18T08:15:25.000Z | 2019-08-16T07:14:43.000Z | lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 364 | 2018-04-18T17:05:02.000Z | 2019-08-18T03:25:38.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=100), min_size=1, max_size=1))
def generate_IndexTensor():
return np.random.randint(1, 5, size=in_shape).astype(np.int32)
unique_with_counts_op = OpConfig(
type="unique_with_counts",
inputs={"X": ["input_data"]},
outputs={
"Out": ["output_data"],
"Index": ["Index_data"],
"Count": ["Count_data"]
},
attrs={"dtype": 2})
program_config = ProgramConfig(
ops=[unique_with_counts_op],
weights={
"Index_data": TensorConfig(data_gen=partial(generate_IndexTensor))
},
inputs={"input_data": TensorConfig(shape=in_shape), },
outputs=["output_data", "Index_data", "Count_data"])
return program_config
| 34.566038 | 125 | 0.68559 |
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=100), min_size=1, max_size=1))
def generate_IndexTensor():
return np.random.randint(1, 5, size=in_shape).astype(np.int32)
unique_with_counts_op = OpConfig(
type="unique_with_counts",
inputs={"X": ["input_data"]},
outputs={
"Out": ["output_data"],
"Index": ["Index_data"],
"Count": ["Count_data"]
},
attrs={"dtype": 2})
program_config = ProgramConfig(
ops=[unique_with_counts_op],
weights={
"Index_data": TensorConfig(data_gen=partial(generate_IndexTensor))
},
inputs={"input_data": TensorConfig(shape=in_shape), },
outputs=["output_data", "Index_data", "Count_data"])
return program_config
| true | true |
f71bebf8420c687bf788c8fe852634cda08565c9 | 54,956 | py | Python | scikit-learn-master/sklearn/linear_model/ridge.py | lqkweb/learnMLflow | 13c5decaebba95b1b90f92021be35e343b4764af | [
"Apache-2.0"
] | 2 | 2019-02-21T10:43:16.000Z | 2019-07-30T04:56:37.000Z | scikit-learn-master/sklearn/linear_model/ridge.py | lqkweb/learnMLflow | 13c5decaebba95b1b90f92021be35e343b4764af | [
"Apache-2.0"
] | null | null | null | scikit-learn-master/sklearn/linear_model/ridge.py | lqkweb/learnMLflow | 13c5decaebba95b1b90f92021be35e343b4764af | [
"Apache-2.0"
] | null | null | null | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics.scorer import check_scoring
from ..exceptions import ConvergenceWarning
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'saga' supports sparse input when`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
# SAG needs X and y columns to be C-contiguous and np.float64
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
tol : float
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However,
only 'sag' and 'saga' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier : Ridge classifier
RidgeCV : Ridge regression with built-in cross validation
:class:`sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float
Precision of the solution.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.9595...
See also
--------
Ridge : Ridge regression
RidgeClassifierCV : Ridge classifier with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : object
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
if np.any(self.alphas < 0):
raise ValueError("alphas cannot be negative. "
"Got {} containing some "
"negative value instead.".format(self.alphas))
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(float(alpha), y, v, Q, QT_y)
else:
out, c = _values(float(alpha), y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : object
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,
normalize=self.normalize),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if ``store_cv_values=True``\
and ``cv=None``). After ``fit()`` has been called, this attribute \
will contain the mean squared errors (by default) or the values \
of the ``{loss,score}_func`` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.5166...
See also
--------
Ridge : Ridge regression
RidgeClassifier : Ridge classifier
RidgeClassifierCV : Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors (by default) or the values of the
``{loss,score}_func`` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.9630...
See also
--------
Ridge : Ridge regression
RidgeClassifier : Ridge classifier
RidgeCV : Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
"""
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| 38.32357 | 79 | 0.619951 |
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics.scorer import check_scoring
from ..exceptions import ConvergenceWarning
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
K.flat[::n_samples + 1] += alpha[0]
try:
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
if np.any(self.alphas < 0):
raise ValueError("alphas cannot be negative. "
"Got {} containing some "
"negative value instead.".format(self.alphas))
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(float(alpha), y, v, Q, QT_y)
else:
out, c = _values(float(alpha), y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,
normalize=self.normalize),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| true | true |
f71bec5753bc8e9f3a6c6cc2541b07d58772b075 | 1,201 | py | Python | tests/tests/test_design/test_randomcodons.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 34 | 2015-12-26T22:13:51.000Z | 2021-11-17T11:46:37.000Z | tests/tests/test_design/test_randomcodons.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 13 | 2015-09-11T23:27:51.000Z | 2018-06-25T20:44:28.000Z | tests/tests/test_design/test_randomcodons.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 14 | 2015-10-08T17:08:48.000Z | 2022-02-22T04:25:54.000Z | '''
Tests for RandomCodons class of analysis module.
'''
from nose.tools import assert_equal, assert_not_equal, assert_raises
from coral import design, reaction, RNA
def test_randomcodons():
'''
This test is pretty basic right now - not sure how much checking
can be done for a random DNA base generator.
'''
reference_seq = RNA('AUGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAG')
reference_peptide = reaction.translate(reference_seq)
output = design.random_codons(reference_peptide)
output_peptide = reaction.translate(reference_seq)
assert_equal(len(output), len(reference_seq) - 3)
assert_equal(reference_peptide, output_peptide)
assert_not_equal(reference_seq, output)
# Setting too high a threshold should raise ValueError
assert_raises(ValueError, design.random_codons, reference_peptide,
frequency_cutoff=1.5)
# Weighted should work
w_output = design.random_codons(reference_peptide, weighted=True)
w_output_peptide = reaction.translate(reference_seq)
assert_equal(len(w_output), len(reference_seq) - 3)
assert_equal(reference_peptide, w_output_peptide)
assert_not_equal(reference_seq, w_output)
| 32.459459 | 70 | 0.757702 |
from nose.tools import assert_equal, assert_not_equal, assert_raises
from coral import design, reaction, RNA
def test_randomcodons():
reference_seq = RNA('AUGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAG')
reference_peptide = reaction.translate(reference_seq)
output = design.random_codons(reference_peptide)
output_peptide = reaction.translate(reference_seq)
assert_equal(len(output), len(reference_seq) - 3)
assert_equal(reference_peptide, output_peptide)
assert_not_equal(reference_seq, output)
assert_raises(ValueError, design.random_codons, reference_peptide,
frequency_cutoff=1.5)
w_output = design.random_codons(reference_peptide, weighted=True)
w_output_peptide = reaction.translate(reference_seq)
assert_equal(len(w_output), len(reference_seq) - 3)
assert_equal(reference_peptide, w_output_peptide)
assert_not_equal(reference_seq, w_output)
| true | true |
f71bec9bd1ce0214c2dc30775d89387898d9b1fb | 328 | py | Python | truck_app/migrations/0010_remove_truck_likes.py | Svetloni89/truck_project | e365d2a84f32e62f8dbc5c371a3355dbbe93557d | [
"MIT"
] | null | null | null | truck_app/migrations/0010_remove_truck_likes.py | Svetloni89/truck_project | e365d2a84f32e62f8dbc5c371a3355dbbe93557d | [
"MIT"
] | null | null | null | truck_app/migrations/0010_remove_truck_likes.py | Svetloni89/truck_project | e365d2a84f32e62f8dbc5c371a3355dbbe93557d | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-14 08:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('truck_app', '0009_auto_20201112_1114'),
]
operations = [
migrations.RemoveField(
model_name='truck',
name='likes',
),
]
| 18.222222 | 49 | 0.591463 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('truck_app', '0009_auto_20201112_1114'),
]
operations = [
migrations.RemoveField(
model_name='truck',
name='likes',
),
]
| true | true |
f71bedbe0e1dd8d7b00f8bfa2461591aca993afa | 2,299 | py | Python | example/exampleapp/models.py | jphilip/django-treewidget | e2eff61f98ea7a520f29a4ed7395ec75d134246b | [
"MIT"
] | null | null | null | example/exampleapp/models.py | jphilip/django-treewidget | e2eff61f98ea7a520f29a4ed7395ec75d134246b | [
"MIT"
] | null | null | null | example/exampleapp/models.py | jphilip/django-treewidget | e2eff61f98ea7a520f29a4ed7395ec75d134246b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
from treewidget.fields import TreeForeignKey, TreeManyToManyField
from django.utils.encoding import python_2_unicode_compatible
# django-mptt
@python_2_unicode_compatible
class Mptt(MPTTModel):
name = models.CharField(max_length=32)
parent = TreeForeignKey(
'self', blank=True, null=True, on_delete=models.CASCADE, settings={'filtered': True})
def __str__(self):
return self.name
# django-treebeard
@python_2_unicode_compatible
class Treebeardmp(MP_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardal(AL_Node):
name = models.CharField(max_length=32)
parent = models.ForeignKey('self', related_name='children_set', null=True,
db_index=True, on_delete=models.CASCADE)
sib_order = models.PositiveIntegerField()
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardns(NS_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
class Example(models.Model):
mptt = TreeForeignKey(Mptt, on_delete=models.CASCADE)
treebeardmp = TreeForeignKey(Treebeardmp, on_delete=models.CASCADE,
settings={'show_buttons': True, 'filtered': True})
treebeardal = TreeForeignKey(Treebeardal, on_delete=models.CASCADE,
settings={'search': True, 'dnd': True, 'sort': True})
treebeardns = TreeForeignKey(Treebeardns, on_delete=models.CASCADE,
settings={'dnd': True})
mptt_many = TreeManyToManyField(Mptt, related_name='example_many',
settings={'show_buttons': True, 'search': True, 'dnd': True})
treebeardmp_many = TreeManyToManyField(Treebeardmp, related_name='example_many')
treebeardal_many = TreeManyToManyField(Treebeardal, related_name='example_many')
treebeardns_many = TreeManyToManyField(Treebeardns, related_name='example_many')
| 35.921875 | 97 | 0.702044 |
from __future__ import unicode_literals
from django.db import models
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
from treewidget.fields import TreeForeignKey, TreeManyToManyField
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Mptt(MPTTModel):
name = models.CharField(max_length=32)
parent = TreeForeignKey(
'self', blank=True, null=True, on_delete=models.CASCADE, settings={'filtered': True})
def __str__(self):
return self.name
@python_2_unicode_compatible
class Treebeardmp(MP_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardal(AL_Node):
name = models.CharField(max_length=32)
parent = models.ForeignKey('self', related_name='children_set', null=True,
db_index=True, on_delete=models.CASCADE)
sib_order = models.PositiveIntegerField()
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardns(NS_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
class Example(models.Model):
mptt = TreeForeignKey(Mptt, on_delete=models.CASCADE)
treebeardmp = TreeForeignKey(Treebeardmp, on_delete=models.CASCADE,
settings={'show_buttons': True, 'filtered': True})
treebeardal = TreeForeignKey(Treebeardal, on_delete=models.CASCADE,
settings={'search': True, 'dnd': True, 'sort': True})
treebeardns = TreeForeignKey(Treebeardns, on_delete=models.CASCADE,
settings={'dnd': True})
mptt_many = TreeManyToManyField(Mptt, related_name='example_many',
settings={'show_buttons': True, 'search': True, 'dnd': True})
treebeardmp_many = TreeManyToManyField(Treebeardmp, related_name='example_many')
treebeardal_many = TreeManyToManyField(Treebeardal, related_name='example_many')
treebeardns_many = TreeManyToManyField(Treebeardns, related_name='example_many')
| true | true |
f71bf0329ac143d3243fa730424a4eebdde5ed63 | 2,519 | py | Python | letsencrypt/constants.py | felixonmars/certbot | 324ebf468db402dbe44ba780ed2df682ab99af86 | [
"Apache-2.0"
] | null | null | null | letsencrypt/constants.py | felixonmars/certbot | 324ebf468db402dbe44ba780ed2df682ab99af86 | [
"Apache-2.0"
] | null | null | null | letsencrypt/constants.py | felixonmars/certbot | 324ebf468db402dbe44ba780ed2df682ab99af86 | [
"Apache-2.0"
] | null | null | null | """Let's Encrypt constants."""
import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
# http://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
verbose_count=-(logging.WARNING / 10),
server="https://acme-staging.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
)
"""Defaults for CLI flags and `.IConfig` attributes."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.TLSSNI01, challenges.HTTP01])])
"""Mutually exclusive challenges."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO
"""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `IConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
BACKUP_DIR = "backups"
"""Directory (relative to `IConfig.work_dir`) where backups are kept."""
CSR_DIR = "csr"
"""See `.IConfig.csr_dir`."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`IConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `IConfig.config_dir`) where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `IConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
RENEWAL_CONFIGS_DIR = "renewal"
"""Renewal configs directory, relative to `IConfig.config_dir`."""
RENEWER_CONFIG_FILENAME = "renewer.conf"
"""Renewer config file name (relative to `IConfig.config_dir`)."""
| 27.086022 | 73 | 0.710599 | import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
verbose_count=-(logging.WARNING / 10),
server="https://acme-staging.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
)
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.TLSSNI01, challenges.HTTP01])])
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
ARCHIVE_DIR = "archive"
CONFIG_DIRS_MODE = 0o755
ACCOUNTS_DIR = "accounts"
BACKUP_DIR = "backups"
CSR_DIR = "csr"
IN_PROGRESS_DIR = "IN_PROGRESS"
KEY_DIR = "keys"
LIVE_DIR = "live"
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
RENEWAL_CONFIGS_DIR = "renewal"
RENEWER_CONFIG_FILENAME = "renewer.conf"
| true | true |
f71bf03d595623a0ca93dde19a49a3566fbf4da1 | 2,082 | py | Python | scraper.py | Luisf718/web_scraping | df599a1ea77a1475a05d97884f57a4c3761adec5 | [
"MIT"
] | null | null | null | scraper.py | Luisf718/web_scraping | df599a1ea77a1475a05d97884f57a4c3761adec5 | [
"MIT"
] | null | null | null | scraper.py | Luisf718/web_scraping | df599a1ea77a1475a05d97884f57a4c3761adec5 | [
"MIT"
] | null | null | null | import requests
import lxml.html as html
import os
import datetime
#El text-fill es un h2 pero la libreria no identifica el h2 como tal sino como text-fill
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//text-fill/a/@href'
XPATH_TITLE = '//div[@class="mb-auto"]/text-fill/span//text()'
XPATH_SUMMARY = '//div[@class="lead"]/p//text()'
XPATH_BODY = '//div[@class="html-content"]/p//text()'
def parse_notice(link, today):
try:
response = requests.get(link)
if response.status_code == 200:
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)[0]
title = title.replace('\"','')
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
except IndexError:
return
with open(f'{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(str(title))
f.write('\n\n')
f.write(str(summary))
f.write('\n\n')
for p in body:
f.write(str(p))
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
home = response.content.decode('utf-8')
parsed = html.fromstring(home)
links_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(links_to_notices)
today = datetime.date.today().strftime('%d-%m-%Y')
if not os.path.isdir(today):
os.mkdir(today)
for link in links_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
run() | 30.617647 | 88 | 0.548511 | import requests
import lxml.html as html
import os
import datetime
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//text-fill/a/@href'
XPATH_TITLE = '//div[@class="mb-auto"]/text-fill/span//text()'
XPATH_SUMMARY = '//div[@class="lead"]/p//text()'
XPATH_BODY = '//div[@class="html-content"]/p//text()'
def parse_notice(link, today):
try:
response = requests.get(link)
if response.status_code == 200:
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)[0]
title = title.replace('\"','')
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
except IndexError:
return
with open(f'{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(str(title))
f.write('\n\n')
f.write(str(summary))
f.write('\n\n')
for p in body:
f.write(str(p))
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
home = response.content.decode('utf-8')
parsed = html.fromstring(home)
links_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(links_to_notices)
today = datetime.date.today().strftime('%d-%m-%Y')
if not os.path.isdir(today):
os.mkdir(today)
for link in links_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
run() | true | true |
f71bf0555abfc8db7bc79d24972ecc0523d5c881 | 5,928 | py | Python | scaffold_generator/defaults.py | iamswaroopp/django-scaffold-generator | c9aa7269c3a3d9a618dbf41aac8e23649be64d48 | [
"MIT"
] | 6 | 2021-05-20T14:46:23.000Z | 2022-01-24T07:07:55.000Z | scaffold_generator/defaults.py | iamswaroopp/django-scaffold-generator | c9aa7269c3a3d9a618dbf41aac8e23649be64d48 | [
"MIT"
] | null | null | null | scaffold_generator/defaults.py | iamswaroopp/django-scaffold-generator | c9aa7269c3a3d9a618dbf41aac8e23649be64d48 | [
"MIT"
] | null | null | null | DEFAULT_SETTINGS = {
'CREATE_HTML_VIEW_RESOURCES': True,
'CREATE_REST_VIEW_RESOURCES': True,
'DEFAULT_MODEL_IMPORTS': [],
'DEFAULT_FORM_IMPORTS': [
'django.forms',
],
'FIELDS': {},
'MODEL_EXTRA_IMPORT_CLASSES': [
'django.db.models',
],
'MODEL_PARENT_CLASSES': ['django.db.models.Model'],
'FORM_EXTRA_IMPORT_CLASSES': ['django.forms'],
'FORM_PARENT_CLASSES': ['django.forms.ModelForm'],
'VIEW_EXTRA_IMPORT_CLASSES': [],
'VIEW_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'VIEW_PERMISSION_CODES': [
'view',
],
'LIST_VIEW_PARENT_CLASSES': ['django.views.generic.list.ListView'],
'DETAIL_VIEW_PARENT_CLASSES': ['django.views.generic.detail.DetailView'],
'ADD_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'ADD_PERMISSION_CODES': ['add', 'create'],
'CREATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.CreateView'],
'CREATE_URL_PATH': 'create',
'CHANGE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'CHANGE_PERMISSION_CODES': [
'change',
],
'UPDATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.UpdateView'],
'UPDATE_URL_PATH': 'update',
'DELETE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'DELETE_PERMISSION_CODES': [
'delete',
],
'DELETE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.DeleteView'],
'DELETE_URL_PATH': 'delete',
'ADMIN_EXTRA_IMPORT_CLASSES': ['django.contrib.admin'],
'ADMIN_PARENT_CLASSES': ['django.contrib.admin.ModelAdmin'],
'URL_EXTRA_IMPORT_CLASSES': ['django.urls.path'],
'SCAFFOLD_REST_FRAMEWORK': True,
'REST_FRAMEWORK_SERIALIZER_EXTRA_IMPORT_CLASSES': ['rest_framework.serializers'],
'REST_FRAMEWORK_SERIALIZER_PARENT_CLASSES': ['rest_framework.serializers.ModelSerializer'],
'REST_FRAMEWORK_VIEWSET_EXTRA_IMPORT_CLASSES': ['rest_framework.viewsets'],
'REST_FRAMEWORK_VIEWSET_PARENT_CLASSES': ['rest_framework.viewsets.ModelViewSet'],
'REST_FRAMEWORK_VIEWSET_PERMISSION_CLASSES': ['rest_framework.permissions.DjangoModelPermissions'],
'REST_FRAMEWORK_DEFAULT_ROUTER': 'rest_framework.routers.DefaultRouter',
'SCAFFOLD_TEMPLATES': False,
'FORM_EXTRA': '',
'TEMPLATE_VIEW_LIST': 'scaffold_generator/views/model_list.html.template',
'TEMPLATE_VIEW_DETAIL': 'scaffold_generator/views/model_detail.html.template',
'TEMPLATE_VIEW_FORM': 'scaffold_generator/views/model_form.html.template',
'TEMPLATE_VIEW_DELETE': 'scaffold_generator/views/model_delete.html.template',
'ADD_LIST_VIEW_TO_NAVBAR_TEMPLATE': '',
'NAVBAR_ITEM_TEMPLATE': 'scaffold_generator/navbar_item.html.template',
}
DEFAULT_FIELDS = {
'AutoField': {
'class_name': 'models.AutoField',
},
'BigAutoField': {
'class_name': 'models.BigAutoField',
},
'BigIntegerField': {
'class_name': 'models.BigIntegerField',
},
'BinaryField': {
'class_name': 'models.BinaryField',
},
'BooleanField': {
'class_name': 'models.BooleanField',
},
'CharField': {
'class_name': 'models.CharField',
'default_kwargs': {
'max_length': '128',
},
'nullable': False,
},
'CommaSeparatedIntegerField': {
'class_name': 'models.CommaSeparatedIntegerField',
'nullable': False,
},
'DateField': {
'class_name': 'models.DateField',
},
'DateTimeField': {
'class_name': 'models.DateTimeField',
},
'DecimalField': {
'class_name': 'models.DecimalField',
},
'DurationField': {
'class_name': 'models.DurationField',
},
'EmailField': {
'class_name': 'models.EmailField',
'nullable': False,
},
'FileField': {
'class_name': 'models.FileField',
'nullable': False,
},
'FilePathField': {
'class_name': 'models.FilePathField',
},
'FloatField': {
'class_name': 'models.FloatField',
},
'ForeignKey': {
'class_name': 'models.ForeignKey',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'GenericIPAddressField': {
'class_name': 'models.GenericIPAddressField',
},
'IPAddressField': {
'class_name': 'models.IPAddressField',
},
'ImageField': {
'class_name': 'models.ImageField',
'nullable': False,
},
'IntegerField': {
'class_name': 'models.IntegerField',
},
'JSONField': {
'class_name': 'models.JSONField',
},
'ManyToManyField': {
'class_name': 'models.ManyToManyField',
'nullable': False,
},
'NullBooleanField': {
'class_name': 'models.NullBooleanField',
},
'OneToOneField': {
'class_name': 'models.OneToOneField',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'PositiveBigIntegerField': {
'class_name': 'models.PositiveBigIntegerField',
},
'PositiveIntegerField': {
'class_name': 'models.PositiveIntegerField',
},
'PositiveSmallIntegerField': {
'class_name': 'models.PositiveSmallIntegerField',
},
'SlugField': {
'class_name': 'models.SlugField',
'nullable': False,
},
'SmallAutoField': {
'class_name': 'models.SmallAutoField',
},
'SmallIntegerField': {
'class_name': 'models.SmallIntegerField',
},
'TextField': {
'class_name': 'models.TextField',
'nullable': False,
},
'TimeField': {
'class_name': 'models.TimeField',
},
'URLField': {
'class_name': 'models.URLField',
'nullable': False,
},
'UUIDField': {
'class_name': 'models.UUIDField',
},
}
| 31.531915 | 103 | 0.624494 | DEFAULT_SETTINGS = {
'CREATE_HTML_VIEW_RESOURCES': True,
'CREATE_REST_VIEW_RESOURCES': True,
'DEFAULT_MODEL_IMPORTS': [],
'DEFAULT_FORM_IMPORTS': [
'django.forms',
],
'FIELDS': {},
'MODEL_EXTRA_IMPORT_CLASSES': [
'django.db.models',
],
'MODEL_PARENT_CLASSES': ['django.db.models.Model'],
'FORM_EXTRA_IMPORT_CLASSES': ['django.forms'],
'FORM_PARENT_CLASSES': ['django.forms.ModelForm'],
'VIEW_EXTRA_IMPORT_CLASSES': [],
'VIEW_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'VIEW_PERMISSION_CODES': [
'view',
],
'LIST_VIEW_PARENT_CLASSES': ['django.views.generic.list.ListView'],
'DETAIL_VIEW_PARENT_CLASSES': ['django.views.generic.detail.DetailView'],
'ADD_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'ADD_PERMISSION_CODES': ['add', 'create'],
'CREATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.CreateView'],
'CREATE_URL_PATH': 'create',
'CHANGE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'CHANGE_PERMISSION_CODES': [
'change',
],
'UPDATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.UpdateView'],
'UPDATE_URL_PATH': 'update',
'DELETE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'DELETE_PERMISSION_CODES': [
'delete',
],
'DELETE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.DeleteView'],
'DELETE_URL_PATH': 'delete',
'ADMIN_EXTRA_IMPORT_CLASSES': ['django.contrib.admin'],
'ADMIN_PARENT_CLASSES': ['django.contrib.admin.ModelAdmin'],
'URL_EXTRA_IMPORT_CLASSES': ['django.urls.path'],
'SCAFFOLD_REST_FRAMEWORK': True,
'REST_FRAMEWORK_SERIALIZER_EXTRA_IMPORT_CLASSES': ['rest_framework.serializers'],
'REST_FRAMEWORK_SERIALIZER_PARENT_CLASSES': ['rest_framework.serializers.ModelSerializer'],
'REST_FRAMEWORK_VIEWSET_EXTRA_IMPORT_CLASSES': ['rest_framework.viewsets'],
'REST_FRAMEWORK_VIEWSET_PARENT_CLASSES': ['rest_framework.viewsets.ModelViewSet'],
'REST_FRAMEWORK_VIEWSET_PERMISSION_CLASSES': ['rest_framework.permissions.DjangoModelPermissions'],
'REST_FRAMEWORK_DEFAULT_ROUTER': 'rest_framework.routers.DefaultRouter',
'SCAFFOLD_TEMPLATES': False,
'FORM_EXTRA': '',
'TEMPLATE_VIEW_LIST': 'scaffold_generator/views/model_list.html.template',
'TEMPLATE_VIEW_DETAIL': 'scaffold_generator/views/model_detail.html.template',
'TEMPLATE_VIEW_FORM': 'scaffold_generator/views/model_form.html.template',
'TEMPLATE_VIEW_DELETE': 'scaffold_generator/views/model_delete.html.template',
'ADD_LIST_VIEW_TO_NAVBAR_TEMPLATE': '',
'NAVBAR_ITEM_TEMPLATE': 'scaffold_generator/navbar_item.html.template',
}
DEFAULT_FIELDS = {
'AutoField': {
'class_name': 'models.AutoField',
},
'BigAutoField': {
'class_name': 'models.BigAutoField',
},
'BigIntegerField': {
'class_name': 'models.BigIntegerField',
},
'BinaryField': {
'class_name': 'models.BinaryField',
},
'BooleanField': {
'class_name': 'models.BooleanField',
},
'CharField': {
'class_name': 'models.CharField',
'default_kwargs': {
'max_length': '128',
},
'nullable': False,
},
'CommaSeparatedIntegerField': {
'class_name': 'models.CommaSeparatedIntegerField',
'nullable': False,
},
'DateField': {
'class_name': 'models.DateField',
},
'DateTimeField': {
'class_name': 'models.DateTimeField',
},
'DecimalField': {
'class_name': 'models.DecimalField',
},
'DurationField': {
'class_name': 'models.DurationField',
},
'EmailField': {
'class_name': 'models.EmailField',
'nullable': False,
},
'FileField': {
'class_name': 'models.FileField',
'nullable': False,
},
'FilePathField': {
'class_name': 'models.FilePathField',
},
'FloatField': {
'class_name': 'models.FloatField',
},
'ForeignKey': {
'class_name': 'models.ForeignKey',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'GenericIPAddressField': {
'class_name': 'models.GenericIPAddressField',
},
'IPAddressField': {
'class_name': 'models.IPAddressField',
},
'ImageField': {
'class_name': 'models.ImageField',
'nullable': False,
},
'IntegerField': {
'class_name': 'models.IntegerField',
},
'JSONField': {
'class_name': 'models.JSONField',
},
'ManyToManyField': {
'class_name': 'models.ManyToManyField',
'nullable': False,
},
'NullBooleanField': {
'class_name': 'models.NullBooleanField',
},
'OneToOneField': {
'class_name': 'models.OneToOneField',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'PositiveBigIntegerField': {
'class_name': 'models.PositiveBigIntegerField',
},
'PositiveIntegerField': {
'class_name': 'models.PositiveIntegerField',
},
'PositiveSmallIntegerField': {
'class_name': 'models.PositiveSmallIntegerField',
},
'SlugField': {
'class_name': 'models.SlugField',
'nullable': False,
},
'SmallAutoField': {
'class_name': 'models.SmallAutoField',
},
'SmallIntegerField': {
'class_name': 'models.SmallIntegerField',
},
'TextField': {
'class_name': 'models.TextField',
'nullable': False,
},
'TimeField': {
'class_name': 'models.TimeField',
},
'URLField': {
'class_name': 'models.URLField',
'nullable': False,
},
'UUIDField': {
'class_name': 'models.UUIDField',
},
}
| true | true |
f71bf0a388d2199024ab2f51328c979f5c521f72 | 9,018 | py | Python | test/functional/wallet_abandonconflict.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | 1 | 2021-05-17T06:06:57.000Z | 2021-05-17T06:06:57.000Z | test/functional/wallet_abandonconflict.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | null | null | null | test/functional/wallet_abandonconflict.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
from test_framework.sbercoinconfig import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(COINBASE_MATURITY)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.01") #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.99")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 47.714286 | 138 | 0.66833 |
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
from test_framework.sbercoinconfig import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(COINBASE_MATURITY)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.01")
balance = newbalance
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.99")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| true | true |
f71bf12d2f88d3050797e3faff333cb5b247f6af | 4,243 | py | Python | BPR-example/tSNE_matlab/tsne_p.py | jsirait/selection-bias-code | 5b89b2d9bc3c996e27dde4773be105698fd72db9 | [
"BSD-3-Clause"
] | null | null | null | BPR-example/tSNE_matlab/tsne_p.py | jsirait/selection-bias-code | 5b89b2d9bc3c996e27dde4773be105698fd72db9 | [
"BSD-3-Clause"
] | null | null | null | BPR-example/tSNE_matlab/tsne_p.py | jsirait/selection-bias-code | 5b89b2d9bc3c996e27dde4773be105698fd72db9 | [
"BSD-3-Clause"
] | null | null | null | # Generated with SMOP 0.41
from libsmop import *
# tsne_p.m
@function
def tsne_p(P=None,labels=None,no_dims=None,*args,**kwargs):
varargin = tsne_p.varargin
nargin = tsne_p.nargin
#TSNE_P Performs symmetric t-SNE on affinity matrix P
# mappedX = tsne_p(P, labels, no_dims)
# The function performs symmetric t-SNE on pairwise similarity matrix P
# to create a low-dimensional map of no_dims dimensions (default = 2).
# The matrix P is assumed to be symmetric, sum up to 1, and have zeros
# on the diagonal.
# The labels of the data are not used by t-SNE itself, however, they
# are used to color intermediate plots. Please provide an empty labels
# matrix [] if you don't want to plot results during the optimization.
# The low-dimensional data representation is returned in mappedX.
# (C) Laurens van der Maaten, 2010
# University of California, San Diego
if logical_not(exist('labels','var')):
labels=[]
# tsne_p.m:21
if logical_not(exist('no_dims','var')) or isempty(no_dims):
no_dims=2
# tsne_p.m:24
# First check whether we already have an initial solution
if numel(no_dims) > 1:
initial_solution=copy(true)
# tsne_p.m:29
ydata=copy(no_dims)
# tsne_p.m:30
no_dims=size(ydata,2)
# tsne_p.m:31
else:
initial_solution=copy(false)
# tsne_p.m:33
# Initialize some variables
n=size(P,1)
# tsne_p.m:37
momentum=0.5
# tsne_p.m:38
final_momentum=0.8
# tsne_p.m:39
mom_switch_iter=250
# tsne_p.m:40
stop_lying_iter=100
# tsne_p.m:41
max_iter=1000
# tsne_p.m:42
epsilon=500
# tsne_p.m:43
min_gain=0.01
# tsne_p.m:44
# Make sure P-vals are set properly
P[arange(1,end(),n + 1)]=0
# tsne_p.m:47
P=dot(0.5,(P + P.T))
# tsne_p.m:48
P=max(P / sum(ravel(P)),realmin)
# tsne_p.m:49
const=sum(multiply(ravel(P),log(ravel(P))))
# tsne_p.m:50
if logical_not(initial_solution):
P=dot(P,4)
# tsne_p.m:52
# Initialize the solution
if logical_not(initial_solution):
ydata=dot(0.0001,randn(n,no_dims))
# tsne_p.m:57
y_incs=zeros(size(ydata))
# tsne_p.m:59
gains=ones(size(ydata))
# tsne_p.m:60
for iter in arange(1,max_iter).reshape(-1):
# Compute joint probability that point i and j are neighbors
sum_ydata=sum(ydata ** 2,2)
# tsne_p.m:66
num=1 / (1 + bsxfun(plus,sum_ydata,bsxfun(plus,sum_ydata.T,dot(- 2,(dot(ydata,ydata.T))))))
# tsne_p.m:67
num[arange(1,end(),n + 1)]=0
# tsne_p.m:68
Q=max(num / sum(ravel(num)),realmin)
# tsne_p.m:69
# Compute the gradients (faster implementation)
L=multiply((P - Q),num)
# tsne_p.m:72
y_grads=dot(dot(4,(diag(sum(L,1)) - L)),ydata)
# tsne_p.m:73
gains=multiply((gains + 0.2),(sign(y_grads) != sign(y_incs))) + multiply((dot(gains,0.8)),(sign(y_grads) == sign(y_incs)))
# tsne_p.m:76
gains[gains < min_gain]=min_gain
# tsne_p.m:78
y_incs=dot(momentum,y_incs) - dot(epsilon,(multiply(gains,y_grads)))
# tsne_p.m:79
ydata=ydata + y_incs
# tsne_p.m:80
ydata=bsxfun(minus,ydata,mean(ydata,1))
# tsne_p.m:81
if iter == mom_switch_iter:
momentum=copy(final_momentum)
# tsne_p.m:85
if iter == stop_lying_iter and logical_not(initial_solution):
P=P / 4
# tsne_p.m:88
# Print out progress
if logical_not(rem(iter,10)):
cost=const - sum(multiply(ravel(P),log(ravel(Q))))
# tsne_p.m:93
disp(concat(['Iteration ',num2str(iter),': error is ',num2str(cost)]))
# Display scatter plot (maximally first three dimensions)
if logical_not(rem(iter,10)) and logical_not(isempty(labels)):
if no_dims == 1:
scatter(ydata,ydata,9,labels,'filled')
else:
if no_dims == 2:
scatter(ydata(arange(),1),ydata(arange(),2),9,labels,'filled')
else:
scatter3(ydata(arange(),1),ydata(arange(),2),ydata(arange(),3),40,labels,'filled')
axis('tight')
axis('off')
drawnow
| 27.732026 | 130 | 0.607825 |
from libsmop import *
@function
def tsne_p(P=None,labels=None,no_dims=None,*args,**kwargs):
varargin = tsne_p.varargin
nargin = tsne_p.nargin
# The low-dimensional data representation is returned in mappedX.
# (C) Laurens van der Maaten, 2010
# University of California, San Diego
if logical_not(exist('labels','var')):
labels=[]
# tsne_p.m:21
if logical_not(exist('no_dims','var')) or isempty(no_dims):
no_dims=2
# tsne_p.m:24
# First check whether we already have an initial solution
if numel(no_dims) > 1:
initial_solution=copy(true)
# tsne_p.m:29
ydata=copy(no_dims)
# tsne_p.m:30
no_dims=size(ydata,2)
# tsne_p.m:31
else:
initial_solution=copy(false)
# tsne_p.m:33
# Initialize some variables
n=size(P,1)
# tsne_p.m:37
momentum=0.5
# tsne_p.m:38
final_momentum=0.8
# tsne_p.m:39
mom_switch_iter=250
# tsne_p.m:40
stop_lying_iter=100
# tsne_p.m:41
max_iter=1000
# tsne_p.m:42
epsilon=500
# tsne_p.m:43
min_gain=0.01
# tsne_p.m:44
# Make sure P-vals are set properly
P[arange(1,end(),n + 1)]=0
# tsne_p.m:47
P=dot(0.5,(P + P.T))
# tsne_p.m:48
P=max(P / sum(ravel(P)),realmin)
# tsne_p.m:49
const=sum(multiply(ravel(P),log(ravel(P))))
# tsne_p.m:50
if logical_not(initial_solution):
P=dot(P,4)
# tsne_p.m:52
# Initialize the solution
if logical_not(initial_solution):
ydata=dot(0.0001,randn(n,no_dims))
# tsne_p.m:57
y_incs=zeros(size(ydata))
# tsne_p.m:59
gains=ones(size(ydata))
# tsne_p.m:60
for iter in arange(1,max_iter).reshape(-1):
# Compute joint probability that point i and j are neighbors
sum_ydata=sum(ydata ** 2,2)
# tsne_p.m:66
num=1 / (1 + bsxfun(plus,sum_ydata,bsxfun(plus,sum_ydata.T,dot(- 2,(dot(ydata,ydata.T))))))
# tsne_p.m:67
num[arange(1,end(),n + 1)]=0
# tsne_p.m:68
Q=max(num / sum(ravel(num)),realmin)
# tsne_p.m:69
# Compute the gradients (faster implementation)
L=multiply((P - Q),num)
# tsne_p.m:72
y_grads=dot(dot(4,(diag(sum(L,1)) - L)),ydata)
# tsne_p.m:73
gains=multiply((gains + 0.2),(sign(y_grads) != sign(y_incs))) + multiply((dot(gains,0.8)),(sign(y_grads) == sign(y_incs)))
# tsne_p.m:76
gains[gains < min_gain]=min_gain
# tsne_p.m:78
y_incs=dot(momentum,y_incs) - dot(epsilon,(multiply(gains,y_grads)))
# tsne_p.m:79
ydata=ydata + y_incs
# tsne_p.m:80
ydata=bsxfun(minus,ydata,mean(ydata,1))
# tsne_p.m:81
if iter == mom_switch_iter:
momentum=copy(final_momentum)
# tsne_p.m:85
if iter == stop_lying_iter and logical_not(initial_solution):
P=P / 4
# tsne_p.m:88
# Print out progress
if logical_not(rem(iter,10)):
cost=const - sum(multiply(ravel(P),log(ravel(Q))))
# tsne_p.m:93
disp(concat(['Iteration ',num2str(iter),': error is ',num2str(cost)]))
# Display scatter plot (maximally first three dimensions)
if logical_not(rem(iter,10)) and logical_not(isempty(labels)):
if no_dims == 1:
scatter(ydata,ydata,9,labels,'filled')
else:
if no_dims == 2:
scatter(ydata(arange(),1),ydata(arange(),2),9,labels,'filled')
else:
scatter3(ydata(arange(),1),ydata(arange(),2),ydata(arange(),3),40,labels,'filled')
axis('tight')
axis('off')
drawnow
| true | true |
f71bf176600708ef5ff1a1e657e714c9aea3da7b | 664 | py | Python | dags/externaltasksensor_dag.py | vannguyen3007/Apache-Airflow | 1c8a6f3c9c0022807cbae4c4f83de33b8454ae24 | [
"Apache-2.0"
] | null | null | null | dags/externaltasksensor_dag.py | vannguyen3007/Apache-Airflow | 1c8a6f3c9c0022807cbae4c4f83de33b8454ae24 | [
"Apache-2.0"
] | null | null | null | dags/externaltasksensor_dag.py | vannguyen3007/Apache-Airflow | 1c8a6f3c9c0022807cbae4c4f83de33b8454ae24 | [
"Apache-2.0"
] | null | null | null | import pprint as pp
import airflow.utils.dates
from airflow import DAG
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1)
}
with DAG(dag_id="externaltasksensor_dag", default_args=default_args, schedule_interval="@daily") as dag:
sensor = ExternalTaskSensor(
task_id='sensor',
external_dag_id='sleep_dag',
external_task_id='t2'
)
last_task = DummyOperator(task_id="last_task")
sensor >> last_task | 30.181818 | 104 | 0.733434 | import pprint as pp
import airflow.utils.dates
from airflow import DAG
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1)
}
with DAG(dag_id="externaltasksensor_dag", default_args=default_args, schedule_interval="@daily") as dag:
sensor = ExternalTaskSensor(
task_id='sensor',
external_dag_id='sleep_dag',
external_task_id='t2'
)
last_task = DummyOperator(task_id="last_task")
sensor >> last_task | true | true |
f71bf1d55740e24cffa947f3eb30e8b50f0271f3 | 198 | py | Python | store_debug/store_debug/main.py | ruber0id/band-services-set | 471bb4bf561ca5ec9e0e3a7bda75574b0c718fde | [
"Apache-2.0"
] | 4 | 2018-08-27T05:36:37.000Z | 2018-08-29T09:41:50.000Z | store_debug/store_debug/main.py | ruber0id/band-services-set | 471bb4bf561ca5ec9e0e3a7bda75574b0c718fde | [
"Apache-2.0"
] | null | null | null | store_debug/store_debug/main.py | ruber0id/band-services-set | 471bb4bf561ca5ec9e0e3a7bda75574b0c718fde | [
"Apache-2.0"
] | 2 | 2020-05-20T14:50:59.000Z | 2020-08-31T14:44:57.000Z | import asyncio
import ujson
from band import logger, expose
"""
Listen events and write to output
"""
@expose.listener()
async def broadcast(**params):
logger.info('Broadcast', params=params)
| 16.5 | 43 | 0.737374 | import asyncio
import ujson
from band import logger, expose
@expose.listener()
async def broadcast(**params):
logger.info('Broadcast', params=params)
| true | true |
f71bf5092e429695b0035e78d005436c626887d2 | 26,148 | py | Python | python/mtap/deployment.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 3 | 2020-03-06T21:24:24.000Z | 2021-03-21T06:38:00.000Z | python/mtap/deployment.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 40 | 2019-10-14T17:02:54.000Z | 2022-03-09T13:35:54.000Z | python/mtap/deployment.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 2 | 2019-10-14T15:42:46.000Z | 2020-03-05T23:29:01.000Z | # Copyright 2020 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for deploying a set of processing services and the events server all at once.
Examples:
An example configuration
>>> deploy = Deployment(
>>> GlobalSettings(host='0.0.0.0'),
>>> EventsDeployment(port=10100, workers=8),
>>> SharedProcessorConfig(workers=8, jvm_args=['-Xms32m', '-Xmx8g'], classpath='blah.jar'),
>>> ProcessorDeployment(implementation='python',
>>> entry_point='mtap.examples.example_processor',
>>> instances=4,
>>> port=10101,
>>> workers=4),
>>> ProcessorDeployment(implementation='java',
>>> entry_point='edu.umn.nlpie.mtap.WordOccurrencesExampleProcessor',
>>> port=10105)
>>> )
>>> deploy.run_servers()
"""
import argparse
import logging
import os
import pathlib
import shutil
import subprocess
import sys
import threading
import time
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import grpc
from mtap import utilities, _config
__all__ = [
'Deployment', 'GlobalSettings', 'SharedProcessorConfig', 'EventsDeployment',
'ProcessorDeployment', 'main', 'deployment_parser', 'ServiceDeploymentException',
]
logger = logging.getLogger(__name__)
PYTHON_EXE = sys.executable
def _get_java() -> str:
try:
return str(pathlib.Path(os.environ['JAVA_HOME']) / 'bin' / 'java')
except KeyError:
return 'java'
JAVA_EXE = _get_java()
def _listen(process: subprocess.Popen) -> int:
for line in process.stdout:
print(line.decode(), end='', flush=True)
return process.wait()
class ServiceDeploymentException(Exception):
"""Exception raised in the case of a service failing to launch.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class GlobalSettings:
"""Settings shared by event service and all processors.
Keyword Args:
host (Optional[str]): The global host, by default will use "127.0.0.1".
mtap_config (Optional[str]): The path to an MTAP config file to load for all services.
log_level (Optional[str]): A python logging level to pass to all services.
register (Optional[str]): Whether services should register with service discovery.
Attributes:
host (Optional[str]): The global host, by default will use "127.0.0.1".
mtap_config (Optional[str]): The path to an MTAP config file to load for all services.
log_level (Optional[str]): A python logging level to pass to all services.
register (Optional[str]): Whether services should register with service discovery.
"""
def __init__(self, *,
host: Optional[str] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
register: Optional[bool] = None):
self.host = host
self.mtap_config = mtap_config
self.log_level = log_level
self.register = register
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'GlobalSettings':
"""Creates a global settings object from a configuration dictionary.
Keyword Args:
conf (Optional[Dict]): The configuration dictionary.
Returns:
GlobalSettings: The global settings object.
"""
conf = conf or {}
return GlobalSettings(host=conf.get('host'), mtap_config=conf.get('mtap_config'),
log_level=conf.get('log_level'), register=conf.get('register'))
class SharedProcessorConfig:
"""Configuration that is shared between multiple processor services.
Keyword Args:
events_addresses (Optional[str]): An optional GRPC-compatible target for the events
service to be used by all processors.
workers (Optional[int]): The default number of worker threads which will perform
processing.
additional_args (Optional[List[str]]): a list of additional arguments that
should be appended to every processor.
jvm_args (Optional[List[str]]): a list of JVM arguments for all java
processors.
java_classpath (Optional[str]): A classpath string that will be passed to all java
processors.
startup_timeout (Optional[int]): The default startup timeout for processors.
mp_spawn_method (Optional[str]): A :meth:`multiprocessing.get_context` argument to create
the multiprocessing context.
Attributes:
events_addresses (Optional[List[str]]): An optional GRPC-compatible target for the events
service to be used by all processors.
workers (Optional[int]): The default number of worker threads which will perform
processing.
additional_args (Optional[List[str]]): a list of additional arguments that
should be appended to every processor.
jvm_args (Optional[List[str]]): a list of JVM arguments for all java
processors.
java_classpath (Optional[str]): A classpath string that will be passed to all java
processors.
startup_timeout (Optional[int]): The default startup timeout for processors.
mp_spawn_method (Optional[str]): A :meth:`multiprocessing.get_context` argument to create
the multiprocessing context.
"""
def __init__(self,
events_addresses: Optional[List[str]] = None,
workers: Optional[int] = None,
additional_args: Optional[List[str]] = None,
jvm_args: Optional[List[str]] = None,
java_classpath: Optional[str] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.events_addresses = events_addresses
self.workers = workers
self.additional_args = additional_args
self.jvm_args = jvm_args
self.java_classpath = java_classpath
self.startup_timeout = startup_timeout or 30
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'SharedProcessorConfig':
"""Builds a configuration from a dictionary representation.
Args:
conf (Optional[Dict]): The configuration dictionary.
Returns:
SharedProcessorConfig object.
"""
conf = conf or {}
return SharedProcessorConfig(**conf)
class _ServiceDeployment:
def __init__(self,
workers: Optional[int],
register: Optional[bool],
mtap_config: Optional[str],
log_level: Optional[str]):
self.workers = workers
self.register = register
self.mtap_config = mtap_config
self.log_level = log_level
def service_args(self,
host: Optional[str] = None,
port: Optional[int] = None,
register_default: Optional[bool] = None,
host_default: Optional[str] = None,
workers_default: Optional[int] = None,
mtap_config_default: Optional[str] = None,
log_level_default: Optional[str] = None):
call = []
host = host or host_default
if host is not None:
call.extend(['--host', str(host)])
if port is not None:
call.extend(['--port', str(port)])
if self.register or register_default:
call.append('--register')
workers = self.workers or workers_default
if workers is not None:
call.extend(['--workers', str(workers)])
mtap_config = self.mtap_config or mtap_config_default
if mtap_config is not None:
call.extend(['--mtap-config', mtap_config])
log_level = self.log_level or log_level_default
if log_level is not None:
call.extend(['--log-level', log_level])
call.append('--write-address')
return call
class EventsDeployment:
"""Deployment configuration for the events service.
Keyword Args:
enabled (bool): Whether an events service should be created.
addresses (~typing.Optional[~typing.Sequence[str]]): The host address of the events service.
workers (~typing.Optional[int]): The number of worker threads the events service should use.
register (~typing.Optional[bool]): Whether to register the events service with discovery.
mtap_config (~typing.Optional[str]): Path to an mtap configuration file.
log_level (~typing.Optional[str]): The log level for the events service.
"""
def __init__(self, *,
enabled: bool = True,
addresses: Optional[Sequence[str]] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None):
self.enabled = enabled
self.addresses = addresses
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
def create_calls(self, global_settings: GlobalSettings) -> Iterable[List[str]]:
for address in self.addresses:
host = None
port = None
if address:
splits = address.split(':')
if len(splits) == 2:
host, port = splits
if host == '':
host = None
else:
host = splits[0]
call = [PYTHON_EXE, '-m', 'mtap', 'events']
service_args = self.service_deployment.service_args(
host=host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level
)
call.extend(service_args)
yield call
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'EventsDeployment':
"""Creates the EventsDeployment configuration option from a configuration dictionary.
Args:
conf (Optional[Dict]): The configuration dictionary
Returns:
EventsDeployment or None from the configuration dictionary.
"""
conf = conf or {}
enabled = conf.get('enabled')
if enabled is None:
enabled = False
address = conf.get('address', None) or conf.get('addresses', None)
if address is None:
addresses = []
elif isinstance(address, str):
addresses = [address]
elif isinstance(address, Iterable):
addresses = list(address)
else:
raise ValueError('Unrecognized type of address: ' + type(address))
return EventsDeployment(enabled=enabled, addresses=addresses,
workers=conf.get('workers'), register=conf.get('register'),
mtap_config=conf.get('mtap_config'))
class ProcessorDeployment:
"""Deployment configuration for an MTAP processor.
Used to construct the command for launching the processor. The processor should be a Java Class
with a main method or a Python module with a main block. It should accept the standard MTAP
processor deployment arguments and launch an MTAP processor using :func:`mtap.run_processor` or
the equivalent Java method.
Args:
implementation (str): Either "java" or "python".
entry_point (str): Either the java main class, or the python main module.
enabled (~typing.Optional[bool]): Whether the processor should be launched as part of
deployment. Default is `True` if `None`.
instances (~typing.Optional[int]): The number of instances of the processor to launch.
Default is `1` if `None`.
host (~typing.Optional[str]): The listening host for the processor service.
port (~typing.Optional[int]): The listening port for the processor service.
workers (~typing.Optional[int]): The number of worker threads per instance.
register (~typing.Optional[bool]):
Whether the processor should register with the discovery service specified in the MTAP
configuration
mtap_config (~typing.Optional[str]): Path to the MTAP configuration file.
log_level (~typing.Optional[str]): The log level for the processor.
identifier (~typing.Optional[str]): An optional identifier override to use for registration.
pre_args (~typing.Optional[~typing.List[str]]):
Arguments that occur prior to the MTAP service arguments (like host, port, etc).
additional_args (~typing.Optional[~typing.List[str]]):
Arguments that occur after the MTAP service arguments.
startup_timeout (~typing.Optional[int]): Optional override startup timeout.
mp_spawn_method (~typing.Optional[str]): A :meth:`multiprocessing.get_context` argument to create
the multiprocessing context.
"""
def __init__(self,
implementation: str,
entry_point: str,
*, enabled: Optional[bool] = None,
instances: Optional[int] = None,
host: Optional[str] = None,
port: Optional[int] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
identifier: Optional[str] = None,
pre_args: Optional[List[str]] = None,
additional_args: Optional[List[str]] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.enabled = enabled if enabled is not None else True
self.implementation = implementation
self.entry_point = entry_point
self.instances = instances or 1
if not isinstance(self.instances, int) or self.instances < 1:
raise ValueError("Instances must be strictly positive integer.")
self.identifier = identifier
self.pre_args = pre_args
self.additional_args = additional_args
self.host = host
self.port = port
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
self.startup_timeout = startup_timeout
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Dict) -> 'ProcessorDeployment':
"""Creates an MTAP processor deployment configuration from a configuration dictionary.
Args:
conf (Dict): The configuration dictionary.
Returns:
ProcessorDeployment object that can be used to constuct the call for the processor.
"""
return ProcessorDeployment(**conf)
def create_calls(self,
global_settings: GlobalSettings,
shared_config: SharedProcessorConfig) -> Iterable[List[str]]:
if isinstance(self.port, list):
ports = self.port
elif self.port is None:
ports = [None] * self.instances
else:
ports = list(range(self.port, self.port + self.instances))
for port in ports:
if self.implementation == 'python':
call = [PYTHON_EXE, '-m', self.entry_point]
mp_spawn_method = shared_config.mp_spawn_method
if self.mp_spawn_method is not None:
mp_spawn_method = self.mp_spawn_method
if mp_spawn_method is not None:
call.extend(['--mp-spawn-method', mp_spawn_method])
elif self.implementation == 'java':
call = [str(JAVA_EXE)]
if shared_config.jvm_args is not None:
call.extend(shared_config.jvm_args)
if shared_config.java_classpath is not None:
call.extend(['-cp', shared_config.java_classpath])
call.append(self.entry_point)
else:
raise ValueError('Unrecognized implementation: ' + self.implementation)
if self.pre_args is not None:
call.extend(self.pre_args)
service_args = self.service_deployment.service_args(
host=self.host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level,
workers_default=shared_config.workers
)
call.extend(service_args)
if self.identifier is not None:
call.extend(['--identifier', self.identifier])
events_addresses = shared_config.events_addresses
if events_addresses is not None:
call.extend(['--events', ','.join(events_addresses)])
if self.additional_args is not None:
call.extend(self.additional_args)
if shared_config.additional_args is not None:
call.extend(shared_config.additional_args)
yield call
class Deployment:
"""A automatic deployment configuration which launches a configurable set of MTAP services.
Args:
global_settings (~typing.Optional[GlobalSettings]): Settings shared among all services.
events_deployment (~typing.Optional[EventsDeployment]):
Deployment settings for the events service.
shared_processor_config (~typing.Optional[SharedProcessorConfig]):
Shared configuration settings for all processors.
processors (vararg ProcessorDeployment): Configurations for individual processors.
"""
def __init__(self,
global_settings: Optional[GlobalSettings] = None,
events_deployment: Optional[EventsDeployment] = None,
shared_processor_config: Optional[SharedProcessorConfig] = None,
*processors: ProcessorDeployment):
self.global_settings = global_settings
self.events_deployment = events_deployment
self.shared_processor_config = shared_processor_config
self.processors = processors
@staticmethod
def load_configuration(conf: Dict) -> 'Deployment':
"""Creates a deployment object from a configuration dictionary.
Args:
conf (Dict): The configuration dictionary.
Returns:
Deployment object created.
"""
global_settings = GlobalSettings.from_conf(conf.get('global'))
events = EventsDeployment.from_conf(conf.get('events_service'))
shared_processor_config = SharedProcessorConfig.from_conf(conf.get('shared_processor_config'))
processors_list = conf.get('processors', [])
processors = [ProcessorDeployment.from_conf(c) for c in processors_list]
return Deployment(global_settings, events, shared_processor_config, *processors)
@staticmethod
def from_yaml_file(conf_path: Union[pathlib.Path, str]) -> 'Deployment':
"""Loads a deployment configuration from a yaml file.
Args:
conf_path (str or pathlib.Path): The path to the yaml configuration file.
Returns:
Deployment object created from the configuration.
"""
conf_path = pathlib.Path(conf_path)
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with conf_path.open('rb') as f:
conf = load(f, Loader=Loader)
return Deployment.load_configuration(conf)
def run_servers(self):
"""Starts all of the configured services.
Raises:
ServiceDeploymentException: If one of the services fails to launch.
"""
with _config.Config() as c:
enable_proxy = c.get('grpc.enable_proxy', False)
processes_listeners = []
events_addresses = []
def shutdown(kill=False):
print("Shutting down all processors")
for p, listener in processes_listeners:
if kill:
p.kill()
listener.join(timeout=1)
if self.events_deployment.enabled:
for call in self.events_deployment.create_calls(self.global_settings):
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
listener, events_address = _listen_test_connectivity(p, "events", 30, enable_proxy)
events_addresses.append(events_address)
processes_listeners.append((p, listener))
if (not self.global_settings.register
and not self.events_deployment.service_deployment.register
and self.shared_processor_config.events_addresses is None):
self.shared_processor_config.events_addresses = events_addresses
for processor_deployment in self.processors:
if processor_deployment.enabled:
for call in processor_deployment.create_calls(self.global_settings,
self.shared_processor_config):
logger.debug('Launching processor with call: %s', call)
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
startup_timeout = (processor_deployment.startup_timeout
or self.shared_processor_config.startup_timeout)
try:
listener, _ = _listen_test_connectivity(p, call, startup_timeout,
enable_proxy)
except ServiceDeploymentException as e:
logger.error(str(e))
return shutdown(kill=True)
processes_listeners.append((p, listener))
print('Done deploying all servers.', flush=True)
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
return shutdown()
def _listen_test_connectivity(p: subprocess.Popen,
name: Any,
startup_timeout: int,
enable_proxy: bool = False) -> Tuple[threading.Thread, str]:
listener = threading.Thread(target=_listen, args=(p,))
listener.start()
address = None
for i in range(startup_timeout):
try:
address = utilities.read_address(str(p.pid))
break
except FileNotFoundError:
time.sleep(1)
if address is None:
raise ServiceDeploymentException('Timed out waiting for {} to launch'.format(name))
with grpc.insecure_channel(address,
options=[('grpc.enable_http_proxy', enable_proxy)]) as channel:
future = grpc.channel_ready_future(channel)
try:
future.result(timeout=startup_timeout)
except grpc.FutureTimeoutError:
raise ServiceDeploymentException('Failed to launch: {}'.format(name))
return listener, address
def main(args: Optional[Sequence[str]] = None,
conf: Optional[argparse.Namespace] = None):
if conf is None:
conf = deployment_parser().parse_args(args)
if conf.log_level is not None:
logging.basicConfig(level=getattr(logging, conf.log_level))
if conf.mode == 'run_servers':
deployment = Deployment.from_yaml_file(conf.deploy_config)
deployment.run_servers()
if conf.mode == 'write_example':
example = pathlib.Path(__file__).parent / "examples" / "exampleDeploymentConfiguration.yml"
shutil.copyfile(str(example), "exampleDeploymentConfiguration.yml")
print('Writing "exampleDeploymentConfiguration.yml" to ' + str(pathlib.Path.cwd()))
def deployment_parser() -> argparse.ArgumentParser:
"""Creates a parser for configuration that can be passed to the deployment main method.
Returns:
~argparse.ArgumentParser: The argument parser object that will create a namespace that can
be passed to :func:`main`.
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--log-level', metavar='LEVEL',
help="The log level to use for the deployment script.")
subparsers = parser.add_subparsers(title='mode')
run_servers = subparsers.add_parser('run_servers')
run_servers.add_argument('deploy_config', metavar='CONFIG_FILE', type=pathlib.Path,
help="A path to the deployment configuration to deploy.")
run_servers.set_defaults(mode='run_servers')
write_example = subparsers.add_parser('write_example')
write_example.set_defaults(mode='write_example')
return parser
if __name__ == '__main__':
main()
| 40.728972 | 105 | 0.618594 |
import argparse
import logging
import os
import pathlib
import shutil
import subprocess
import sys
import threading
import time
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import grpc
from mtap import utilities, _config
__all__ = [
'Deployment', 'GlobalSettings', 'SharedProcessorConfig', 'EventsDeployment',
'ProcessorDeployment', 'main', 'deployment_parser', 'ServiceDeploymentException',
]
logger = logging.getLogger(__name__)
PYTHON_EXE = sys.executable
def _get_java() -> str:
try:
return str(pathlib.Path(os.environ['JAVA_HOME']) / 'bin' / 'java')
except KeyError:
return 'java'
JAVA_EXE = _get_java()
def _listen(process: subprocess.Popen) -> int:
for line in process.stdout:
print(line.decode(), end='', flush=True)
return process.wait()
class ServiceDeploymentException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class GlobalSettings:
def __init__(self, *,
host: Optional[str] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
register: Optional[bool] = None):
self.host = host
self.mtap_config = mtap_config
self.log_level = log_level
self.register = register
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'GlobalSettings':
conf = conf or {}
return GlobalSettings(host=conf.get('host'), mtap_config=conf.get('mtap_config'),
log_level=conf.get('log_level'), register=conf.get('register'))
class SharedProcessorConfig:
def __init__(self,
events_addresses: Optional[List[str]] = None,
workers: Optional[int] = None,
additional_args: Optional[List[str]] = None,
jvm_args: Optional[List[str]] = None,
java_classpath: Optional[str] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.events_addresses = events_addresses
self.workers = workers
self.additional_args = additional_args
self.jvm_args = jvm_args
self.java_classpath = java_classpath
self.startup_timeout = startup_timeout or 30
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'SharedProcessorConfig':
conf = conf or {}
return SharedProcessorConfig(**conf)
class _ServiceDeployment:
def __init__(self,
workers: Optional[int],
register: Optional[bool],
mtap_config: Optional[str],
log_level: Optional[str]):
self.workers = workers
self.register = register
self.mtap_config = mtap_config
self.log_level = log_level
def service_args(self,
host: Optional[str] = None,
port: Optional[int] = None,
register_default: Optional[bool] = None,
host_default: Optional[str] = None,
workers_default: Optional[int] = None,
mtap_config_default: Optional[str] = None,
log_level_default: Optional[str] = None):
call = []
host = host or host_default
if host is not None:
call.extend(['--host', str(host)])
if port is not None:
call.extend(['--port', str(port)])
if self.register or register_default:
call.append('--register')
workers = self.workers or workers_default
if workers is not None:
call.extend(['--workers', str(workers)])
mtap_config = self.mtap_config or mtap_config_default
if mtap_config is not None:
call.extend(['--mtap-config', mtap_config])
log_level = self.log_level or log_level_default
if log_level is not None:
call.extend(['--log-level', log_level])
call.append('--write-address')
return call
class EventsDeployment:
def __init__(self, *,
enabled: bool = True,
addresses: Optional[Sequence[str]] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None):
self.enabled = enabled
self.addresses = addresses
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
def create_calls(self, global_settings: GlobalSettings) -> Iterable[List[str]]:
for address in self.addresses:
host = None
port = None
if address:
splits = address.split(':')
if len(splits) == 2:
host, port = splits
if host == '':
host = None
else:
host = splits[0]
call = [PYTHON_EXE, '-m', 'mtap', 'events']
service_args = self.service_deployment.service_args(
host=host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level
)
call.extend(service_args)
yield call
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'EventsDeployment':
conf = conf or {}
enabled = conf.get('enabled')
if enabled is None:
enabled = False
address = conf.get('address', None) or conf.get('addresses', None)
if address is None:
addresses = []
elif isinstance(address, str):
addresses = [address]
elif isinstance(address, Iterable):
addresses = list(address)
else:
raise ValueError('Unrecognized type of address: ' + type(address))
return EventsDeployment(enabled=enabled, addresses=addresses,
workers=conf.get('workers'), register=conf.get('register'),
mtap_config=conf.get('mtap_config'))
class ProcessorDeployment:
def __init__(self,
implementation: str,
entry_point: str,
*, enabled: Optional[bool] = None,
instances: Optional[int] = None,
host: Optional[str] = None,
port: Optional[int] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
identifier: Optional[str] = None,
pre_args: Optional[List[str]] = None,
additional_args: Optional[List[str]] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.enabled = enabled if enabled is not None else True
self.implementation = implementation
self.entry_point = entry_point
self.instances = instances or 1
if not isinstance(self.instances, int) or self.instances < 1:
raise ValueError("Instances must be strictly positive integer.")
self.identifier = identifier
self.pre_args = pre_args
self.additional_args = additional_args
self.host = host
self.port = port
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
self.startup_timeout = startup_timeout
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Dict) -> 'ProcessorDeployment':
return ProcessorDeployment(**conf)
def create_calls(self,
global_settings: GlobalSettings,
shared_config: SharedProcessorConfig) -> Iterable[List[str]]:
if isinstance(self.port, list):
ports = self.port
elif self.port is None:
ports = [None] * self.instances
else:
ports = list(range(self.port, self.port + self.instances))
for port in ports:
if self.implementation == 'python':
call = [PYTHON_EXE, '-m', self.entry_point]
mp_spawn_method = shared_config.mp_spawn_method
if self.mp_spawn_method is not None:
mp_spawn_method = self.mp_spawn_method
if mp_spawn_method is not None:
call.extend(['--mp-spawn-method', mp_spawn_method])
elif self.implementation == 'java':
call = [str(JAVA_EXE)]
if shared_config.jvm_args is not None:
call.extend(shared_config.jvm_args)
if shared_config.java_classpath is not None:
call.extend(['-cp', shared_config.java_classpath])
call.append(self.entry_point)
else:
raise ValueError('Unrecognized implementation: ' + self.implementation)
if self.pre_args is not None:
call.extend(self.pre_args)
service_args = self.service_deployment.service_args(
host=self.host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level,
workers_default=shared_config.workers
)
call.extend(service_args)
if self.identifier is not None:
call.extend(['--identifier', self.identifier])
events_addresses = shared_config.events_addresses
if events_addresses is not None:
call.extend(['--events', ','.join(events_addresses)])
if self.additional_args is not None:
call.extend(self.additional_args)
if shared_config.additional_args is not None:
call.extend(shared_config.additional_args)
yield call
class Deployment:
def __init__(self,
global_settings: Optional[GlobalSettings] = None,
events_deployment: Optional[EventsDeployment] = None,
shared_processor_config: Optional[SharedProcessorConfig] = None,
*processors: ProcessorDeployment):
self.global_settings = global_settings
self.events_deployment = events_deployment
self.shared_processor_config = shared_processor_config
self.processors = processors
@staticmethod
def load_configuration(conf: Dict) -> 'Deployment':
global_settings = GlobalSettings.from_conf(conf.get('global'))
events = EventsDeployment.from_conf(conf.get('events_service'))
shared_processor_config = SharedProcessorConfig.from_conf(conf.get('shared_processor_config'))
processors_list = conf.get('processors', [])
processors = [ProcessorDeployment.from_conf(c) for c in processors_list]
return Deployment(global_settings, events, shared_processor_config, *processors)
@staticmethod
def from_yaml_file(conf_path: Union[pathlib.Path, str]) -> 'Deployment':
conf_path = pathlib.Path(conf_path)
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with conf_path.open('rb') as f:
conf = load(f, Loader=Loader)
return Deployment.load_configuration(conf)
def run_servers(self):
with _config.Config() as c:
enable_proxy = c.get('grpc.enable_proxy', False)
processes_listeners = []
events_addresses = []
def shutdown(kill=False):
print("Shutting down all processors")
for p, listener in processes_listeners:
if kill:
p.kill()
listener.join(timeout=1)
if self.events_deployment.enabled:
for call in self.events_deployment.create_calls(self.global_settings):
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
listener, events_address = _listen_test_connectivity(p, "events", 30, enable_proxy)
events_addresses.append(events_address)
processes_listeners.append((p, listener))
if (not self.global_settings.register
and not self.events_deployment.service_deployment.register
and self.shared_processor_config.events_addresses is None):
self.shared_processor_config.events_addresses = events_addresses
for processor_deployment in self.processors:
if processor_deployment.enabled:
for call in processor_deployment.create_calls(self.global_settings,
self.shared_processor_config):
logger.debug('Launching processor with call: %s', call)
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
startup_timeout = (processor_deployment.startup_timeout
or self.shared_processor_config.startup_timeout)
try:
listener, _ = _listen_test_connectivity(p, call, startup_timeout,
enable_proxy)
except ServiceDeploymentException as e:
logger.error(str(e))
return shutdown(kill=True)
processes_listeners.append((p, listener))
print('Done deploying all servers.', flush=True)
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
return shutdown()
def _listen_test_connectivity(p: subprocess.Popen,
name: Any,
startup_timeout: int,
enable_proxy: bool = False) -> Tuple[threading.Thread, str]:
listener = threading.Thread(target=_listen, args=(p,))
listener.start()
address = None
for i in range(startup_timeout):
try:
address = utilities.read_address(str(p.pid))
break
except FileNotFoundError:
time.sleep(1)
if address is None:
raise ServiceDeploymentException('Timed out waiting for {} to launch'.format(name))
with grpc.insecure_channel(address,
options=[('grpc.enable_http_proxy', enable_proxy)]) as channel:
future = grpc.channel_ready_future(channel)
try:
future.result(timeout=startup_timeout)
except grpc.FutureTimeoutError:
raise ServiceDeploymentException('Failed to launch: {}'.format(name))
return listener, address
def main(args: Optional[Sequence[str]] = None,
conf: Optional[argparse.Namespace] = None):
if conf is None:
conf = deployment_parser().parse_args(args)
if conf.log_level is not None:
logging.basicConfig(level=getattr(logging, conf.log_level))
if conf.mode == 'run_servers':
deployment = Deployment.from_yaml_file(conf.deploy_config)
deployment.run_servers()
if conf.mode == 'write_example':
example = pathlib.Path(__file__).parent / "examples" / "exampleDeploymentConfiguration.yml"
shutil.copyfile(str(example), "exampleDeploymentConfiguration.yml")
print('Writing "exampleDeploymentConfiguration.yml" to ' + str(pathlib.Path.cwd()))
def deployment_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--log-level', metavar='LEVEL',
help="The log level to use for the deployment script.")
subparsers = parser.add_subparsers(title='mode')
run_servers = subparsers.add_parser('run_servers')
run_servers.add_argument('deploy_config', metavar='CONFIG_FILE', type=pathlib.Path,
help="A path to the deployment configuration to deploy.")
run_servers.set_defaults(mode='run_servers')
write_example = subparsers.add_parser('write_example')
write_example.set_defaults(mode='write_example')
return parser
if __name__ == '__main__':
main()
| true | true |
f71bf509bb30658310cb6206ecdb34d8c4c8f548 | 6,084 | py | Python | sphinx/testing/path.py | Symaxion/sphinx | f4f7936b5c3671153c2646387c8258b1c4e25e3c | [
"BSD-2-Clause"
] | 8 | 2019-04-27T01:19:45.000Z | 2020-09-21T03:31:01.000Z | sphinx/testing/path.py | JoeyCluett/sphinx | ff5031c96e90027510ad2d0251972e12da46402c | [
"BSD-2-Clause"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | sphinx/testing/path.py | JoeyCluett/sphinx | ff5031c96e90027510ad2d0251972e12da46402c | [
"BSD-2-Clause"
] | 5 | 2019-04-27T01:19:47.000Z | 2020-09-20T15:15:19.000Z | """
sphinx.testing.path
~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import shutil
import sys
if False:
# For type annotation
import builtins # NOQA
from typing import Any, Callable, IO, List # NOQA
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
class path(str):
"""
Represents a path which behaves like a string.
"""
@property
def parent(self):
# type: () -> path
"""
The name of the directory the file or directory is in.
"""
return self.__class__(os.path.dirname(self))
def basename(self):
# type: () -> str
return os.path.basename(self)
def abspath(self):
# type: () -> path
"""
Returns the absolute path.
"""
return self.__class__(os.path.abspath(self))
def isabs(self):
# type: () -> bool
"""
Returns ``True`` if the path is absolute.
"""
return os.path.isabs(self)
def isdir(self):
# type: () -> bool
"""
Returns ``True`` if the path is a directory.
"""
return os.path.isdir(self)
def isfile(self):
# type: () -> bool
"""
Returns ``True`` if the path is a file.
"""
return os.path.isfile(self)
def islink(self):
# type: () -> bool
"""
Returns ``True`` if the path is a symbolic link.
"""
return os.path.islink(self)
def ismount(self):
# type: () -> bool
"""
Returns ``True`` if the path is a mount point.
"""
return os.path.ismount(self)
def rmtree(self, ignore_errors=False, onerror=None):
# type: (bool, Callable) -> None
"""
Removes the file or directory and any files or directories it may
contain.
:param ignore_errors:
If ``True`` errors are silently ignored, otherwise an exception
is raised in case an error occurs.
:param onerror:
A callback which gets called with the arguments `func`, `path` and
`exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove`
or :func:`os.rmdir`. `path` is the argument to the function which
caused it to fail and `exc_info` is a tuple as returned by
:func:`sys.exc_info`.
"""
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
# type: (str, bool) -> None
"""
Recursively copy a directory to the given `destination`. If the given
`destination` does not exist it will be created.
:param symlinks:
If ``True`` symbolic links in the source tree result in symbolic
links in the destination tree otherwise the contents of the files
pointed to by the symbolic links are copied.
"""
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
# type: (str) -> None
"""
Recursively move the file or directory to the given `destination`
similar to the Unix "mv" command.
If the `destination` is a file it may be overwritten depending on the
:func:`os.rename` semantics.
"""
shutil.move(self, destination)
move = movetree
def unlink(self):
# type: () -> None
"""
Removes a file.
"""
os.unlink(self)
def stat(self):
# type: () -> Any
"""
Returns a stat of the file.
"""
return os.stat(self)
def utime(self, arg):
# type: (Any) -> None
os.utime(self, arg)
def open(self, mode='r', **kwargs):
# type: (str, Any) -> IO
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
# type: (str, str, Any) -> None
"""
Writes the given `text` to the file.
"""
with open(self, 'w', encoding=encoding, **kwargs) as f:
f.write(text)
def text(self, encoding='utf-8', **kwargs):
# type: (str, Any) -> str
"""
Returns the text in the file.
"""
with open(self, encoding=encoding, **kwargs) as f:
return f.read()
def bytes(self):
# type: () -> builtins.bytes
"""
Returns the bytes in the file.
"""
with open(self, mode='rb') as f:
return f.read()
def write_bytes(self, bytes, append=False):
# type: (str, bool) -> None
"""
Writes the given `bytes` to the file.
:param append:
If ``True`` given `bytes` are added at the end of the file.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
with open(self, mode=mode) as f:
f.write(bytes)
def exists(self):
# type: () -> bool
"""
Returns ``True`` if the path exist.
"""
return os.path.exists(self)
def lexists(self):
# type: () -> bool
"""
Returns ``True`` if the path exists unless it is a broken symbolic
link.
"""
return os.path.lexists(self)
def makedirs(self, mode=0o777, exist_ok=False):
# type: (int, bool) -> None
"""
Recursively create directories.
"""
os.makedirs(self, mode, exist_ok=exist_ok)
def joinpath(self, *args):
# type: (Any) -> path
"""
Joins the path with the argument given and returns the result.
"""
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
# type: () -> List[str]
return os.listdir(self)
__div__ = __truediv__ = joinpath
def __repr__(self):
# type: () -> str
return '%s(%s)' % (self.__class__.__name__, super().__repr__())
| 27.160714 | 78 | 0.534845 | import os
import shutil
import sys
if False:
import builtins
from typing import Any, Callable, IO, List
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
class path(str):
@property
def parent(self):
return self.__class__(os.path.dirname(self))
def basename(self):
return os.path.basename(self)
def abspath(self):
return self.__class__(os.path.abspath(self))
def isabs(self):
return os.path.isabs(self)
def isdir(self):
return os.path.isdir(self)
def isfile(self):
return os.path.isfile(self)
def islink(self):
return os.path.islink(self)
def ismount(self):
return os.path.ismount(self)
def rmtree(self, ignore_errors=False, onerror=None):
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
shutil.move(self, destination)
move = movetree
def unlink(self):
os.unlink(self)
def stat(self):
return os.stat(self)
def utime(self, arg):
os.utime(self, arg)
def open(self, mode='r', **kwargs):
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
with open(self, 'w', encoding=encoding, **kwargs) as f:
f.write(text)
def text(self, encoding='utf-8', **kwargs):
with open(self, encoding=encoding, **kwargs) as f:
return f.read()
def bytes(self):
with open(self, mode='rb') as f:
return f.read()
def write_bytes(self, bytes, append=False):
if append:
mode = 'ab'
else:
mode = 'wb'
with open(self, mode=mode) as f:
f.write(bytes)
def exists(self):
return os.path.exists(self)
def lexists(self):
return os.path.lexists(self)
def makedirs(self, mode=0o777, exist_ok=False):
os.makedirs(self, mode, exist_ok=exist_ok)
def joinpath(self, *args):
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
return os.listdir(self)
__div__ = __truediv__ = joinpath
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, super().__repr__())
| true | true |
f71bf51348a8a1504b096c6228f4179dd5875c54 | 1,538 | py | Python | consort/tools/ClefSpannerExpression.py | josiah-wolf-oberholtzer/consort | 6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571 | [
"MIT"
] | 9 | 2015-02-11T09:35:40.000Z | 2019-04-29T23:57:49.000Z | consort/tools/ClefSpannerExpression.py | josiah-wolf-oberholtzer/consort | 6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571 | [
"MIT"
] | 2 | 2016-02-07T18:54:47.000Z | 2017-08-10T01:38:01.000Z | consort/tools/ClefSpannerExpression.py | josiah-wolf-oberholtzer/consort | 6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571 | [
"MIT"
] | 1 | 2019-05-13T12:37:15.000Z | 2019-05-13T12:37:15.000Z | from abjad import attach
from abjad import inspect
from abjad import iterate
from abjad.tools import abctools
from abjad.tools import scoretools
class ClefSpannerExpression(abctools.AbjadValueObject):
r'''A clef spanner expression.
'''
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(self):
pass
### SPECIAL METHODS ###
def __call__(self, music, name=None):
import consort
leaves = list(iterate(music).by_leaf())
weights = []
weighted_pitches = []
for leaf in leaves:
weight = float(inspect(leaf).get_duration())
if isinstance(leaf, abjad.Note):
pitch = float(leaf.written_pitch)
weighted_pitch = pitch * weight
weights.append(weight)
weighted_pitches.append(weighted_pitch)
elif isinstance(leaf, abjad.Chord):
for pitch in leaf.written_pitches:
pitch = float(pitch)
weighted_pitch = pitch * weight
weighted_pitches.append(weighted_pitch)
weights.append(weight)
sum_of_weights = sum(weights)
sum_of_weighted_pitches = sum(weighted_pitches)
weighted_average = sum_of_weighted_pitches / sum_of_weights
if weighted_average < 0:
clef_spanner = consort.ClefSpanner('bass')
else:
clef_spanner = consort.ClefSpanner('treble')
attach(clef_spanner, music, name=name)
| 31.387755 | 67 | 0.605982 | from abjad import attach
from abjad import inspect
from abjad import iterate
from abjad.tools import abctools
from abjad.tools import scoretools
class ClefSpannerExpression(abctools.AbjadValueObject):
leaves = list(iterate(music).by_leaf())
weights = []
weighted_pitches = []
for leaf in leaves:
weight = float(inspect(leaf).get_duration())
if isinstance(leaf, abjad.Note):
pitch = float(leaf.written_pitch)
weighted_pitch = pitch * weight
weights.append(weight)
weighted_pitches.append(weighted_pitch)
elif isinstance(leaf, abjad.Chord):
for pitch in leaf.written_pitches:
pitch = float(pitch)
weighted_pitch = pitch * weight
weighted_pitches.append(weighted_pitch)
weights.append(weight)
sum_of_weights = sum(weights)
sum_of_weighted_pitches = sum(weighted_pitches)
weighted_average = sum_of_weighted_pitches / sum_of_weights
if weighted_average < 0:
clef_spanner = consort.ClefSpanner('bass')
else:
clef_spanner = consort.ClefSpanner('treble')
attach(clef_spanner, music, name=name)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.